xref: /OK3568_Linux_fs/kernel/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0 OR MIT
2*4882a593Smuzhiyun /**************************************************************************
3*4882a593Smuzhiyun  *
4*4882a593Smuzhiyun  * Copyright 2009-2015 VMware, Inc., Palo Alto, CA., USA
5*4882a593Smuzhiyun  *
6*4882a593Smuzhiyun  * Permission is hereby granted, free of charge, to any person obtaining a
7*4882a593Smuzhiyun  * copy of this software and associated documentation files (the
8*4882a593Smuzhiyun  * "Software"), to deal in the Software without restriction, including
9*4882a593Smuzhiyun  * without limitation the rights to use, copy, modify, merge, publish,
10*4882a593Smuzhiyun  * distribute, sub license, and/or sell copies of the Software, and to
11*4882a593Smuzhiyun  * permit persons to whom the Software is furnished to do so, subject to
12*4882a593Smuzhiyun  * the following conditions:
13*4882a593Smuzhiyun  *
14*4882a593Smuzhiyun  * The above copyright notice and this permission notice (including the
15*4882a593Smuzhiyun  * next paragraph) shall be included in all copies or substantial portions
16*4882a593Smuzhiyun  * of the Software.
17*4882a593Smuzhiyun  *
18*4882a593Smuzhiyun  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19*4882a593Smuzhiyun  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20*4882a593Smuzhiyun  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21*4882a593Smuzhiyun  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22*4882a593Smuzhiyun  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23*4882a593Smuzhiyun  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24*4882a593Smuzhiyun  * USE OR OTHER DEALINGS IN THE SOFTWARE.
25*4882a593Smuzhiyun  *
26*4882a593Smuzhiyun  **************************************************************************/
27*4882a593Smuzhiyun 
28*4882a593Smuzhiyun #include <drm/ttm/ttm_placement.h>
29*4882a593Smuzhiyun 
30*4882a593Smuzhiyun #include "vmwgfx_drv.h"
31*4882a593Smuzhiyun #include "vmwgfx_resource_priv.h"
32*4882a593Smuzhiyun #include "vmwgfx_binding.h"
33*4882a593Smuzhiyun 
34*4882a593Smuzhiyun struct vmw_shader {
35*4882a593Smuzhiyun 	struct vmw_resource res;
36*4882a593Smuzhiyun 	SVGA3dShaderType type;
37*4882a593Smuzhiyun 	uint32_t size;
38*4882a593Smuzhiyun 	uint8_t num_input_sig;
39*4882a593Smuzhiyun 	uint8_t num_output_sig;
40*4882a593Smuzhiyun };
41*4882a593Smuzhiyun 
42*4882a593Smuzhiyun struct vmw_user_shader {
43*4882a593Smuzhiyun 	struct ttm_base_object base;
44*4882a593Smuzhiyun 	struct vmw_shader shader;
45*4882a593Smuzhiyun };
46*4882a593Smuzhiyun 
47*4882a593Smuzhiyun struct vmw_dx_shader {
48*4882a593Smuzhiyun 	struct vmw_resource res;
49*4882a593Smuzhiyun 	struct vmw_resource *ctx;
50*4882a593Smuzhiyun 	struct vmw_resource *cotable;
51*4882a593Smuzhiyun 	u32 id;
52*4882a593Smuzhiyun 	bool committed;
53*4882a593Smuzhiyun 	struct list_head cotable_head;
54*4882a593Smuzhiyun };
55*4882a593Smuzhiyun 
56*4882a593Smuzhiyun static uint64_t vmw_user_shader_size;
57*4882a593Smuzhiyun static uint64_t vmw_shader_size;
58*4882a593Smuzhiyun static size_t vmw_shader_dx_size;
59*4882a593Smuzhiyun 
60*4882a593Smuzhiyun static void vmw_user_shader_free(struct vmw_resource *res);
61*4882a593Smuzhiyun static struct vmw_resource *
62*4882a593Smuzhiyun vmw_user_shader_base_to_res(struct ttm_base_object *base);
63*4882a593Smuzhiyun 
64*4882a593Smuzhiyun static int vmw_gb_shader_create(struct vmw_resource *res);
65*4882a593Smuzhiyun static int vmw_gb_shader_bind(struct vmw_resource *res,
66*4882a593Smuzhiyun 			       struct ttm_validate_buffer *val_buf);
67*4882a593Smuzhiyun static int vmw_gb_shader_unbind(struct vmw_resource *res,
68*4882a593Smuzhiyun 				 bool readback,
69*4882a593Smuzhiyun 				 struct ttm_validate_buffer *val_buf);
70*4882a593Smuzhiyun static int vmw_gb_shader_destroy(struct vmw_resource *res);
71*4882a593Smuzhiyun 
72*4882a593Smuzhiyun static int vmw_dx_shader_create(struct vmw_resource *res);
73*4882a593Smuzhiyun static int vmw_dx_shader_bind(struct vmw_resource *res,
74*4882a593Smuzhiyun 			       struct ttm_validate_buffer *val_buf);
75*4882a593Smuzhiyun static int vmw_dx_shader_unbind(struct vmw_resource *res,
76*4882a593Smuzhiyun 				 bool readback,
77*4882a593Smuzhiyun 				 struct ttm_validate_buffer *val_buf);
78*4882a593Smuzhiyun static void vmw_dx_shader_commit_notify(struct vmw_resource *res,
79*4882a593Smuzhiyun 					enum vmw_cmdbuf_res_state state);
80*4882a593Smuzhiyun static bool vmw_shader_id_ok(u32 user_key, SVGA3dShaderType shader_type);
81*4882a593Smuzhiyun static u32 vmw_shader_key(u32 user_key, SVGA3dShaderType shader_type);
82*4882a593Smuzhiyun static uint64_t vmw_user_shader_size;
83*4882a593Smuzhiyun 
84*4882a593Smuzhiyun static const struct vmw_user_resource_conv user_shader_conv = {
85*4882a593Smuzhiyun 	.object_type = VMW_RES_SHADER,
86*4882a593Smuzhiyun 	.base_obj_to_res = vmw_user_shader_base_to_res,
87*4882a593Smuzhiyun 	.res_free = vmw_user_shader_free
88*4882a593Smuzhiyun };
89*4882a593Smuzhiyun 
90*4882a593Smuzhiyun const struct vmw_user_resource_conv *user_shader_converter =
91*4882a593Smuzhiyun 	&user_shader_conv;
92*4882a593Smuzhiyun 
93*4882a593Smuzhiyun 
94*4882a593Smuzhiyun static const struct vmw_res_func vmw_gb_shader_func = {
95*4882a593Smuzhiyun 	.res_type = vmw_res_shader,
96*4882a593Smuzhiyun 	.needs_backup = true,
97*4882a593Smuzhiyun 	.may_evict = true,
98*4882a593Smuzhiyun 	.prio = 3,
99*4882a593Smuzhiyun 	.dirty_prio = 3,
100*4882a593Smuzhiyun 	.type_name = "guest backed shaders",
101*4882a593Smuzhiyun 	.backup_placement = &vmw_mob_placement,
102*4882a593Smuzhiyun 	.create = vmw_gb_shader_create,
103*4882a593Smuzhiyun 	.destroy = vmw_gb_shader_destroy,
104*4882a593Smuzhiyun 	.bind = vmw_gb_shader_bind,
105*4882a593Smuzhiyun 	.unbind = vmw_gb_shader_unbind
106*4882a593Smuzhiyun };
107*4882a593Smuzhiyun 
108*4882a593Smuzhiyun static const struct vmw_res_func vmw_dx_shader_func = {
109*4882a593Smuzhiyun 	.res_type = vmw_res_shader,
110*4882a593Smuzhiyun 	.needs_backup = true,
111*4882a593Smuzhiyun 	.may_evict = true,
112*4882a593Smuzhiyun 	.prio = 3,
113*4882a593Smuzhiyun 	.dirty_prio = 3,
114*4882a593Smuzhiyun 	.type_name = "dx shaders",
115*4882a593Smuzhiyun 	.backup_placement = &vmw_mob_placement,
116*4882a593Smuzhiyun 	.create = vmw_dx_shader_create,
117*4882a593Smuzhiyun 	/*
118*4882a593Smuzhiyun 	 * The destroy callback is only called with a committed resource on
119*4882a593Smuzhiyun 	 * context destroy, in which case we destroy the cotable anyway,
120*4882a593Smuzhiyun 	 * so there's no need to destroy DX shaders separately.
121*4882a593Smuzhiyun 	 */
122*4882a593Smuzhiyun 	.destroy = NULL,
123*4882a593Smuzhiyun 	.bind = vmw_dx_shader_bind,
124*4882a593Smuzhiyun 	.unbind = vmw_dx_shader_unbind,
125*4882a593Smuzhiyun 	.commit_notify = vmw_dx_shader_commit_notify,
126*4882a593Smuzhiyun };
127*4882a593Smuzhiyun 
128*4882a593Smuzhiyun /**
129*4882a593Smuzhiyun  * Shader management:
130*4882a593Smuzhiyun  */
131*4882a593Smuzhiyun 
132*4882a593Smuzhiyun static inline struct vmw_shader *
vmw_res_to_shader(struct vmw_resource * res)133*4882a593Smuzhiyun vmw_res_to_shader(struct vmw_resource *res)
134*4882a593Smuzhiyun {
135*4882a593Smuzhiyun 	return container_of(res, struct vmw_shader, res);
136*4882a593Smuzhiyun }
137*4882a593Smuzhiyun 
138*4882a593Smuzhiyun /**
139*4882a593Smuzhiyun  * vmw_res_to_dx_shader - typecast a struct vmw_resource to a
140*4882a593Smuzhiyun  * struct vmw_dx_shader
141*4882a593Smuzhiyun  *
142*4882a593Smuzhiyun  * @res: Pointer to the struct vmw_resource.
143*4882a593Smuzhiyun  */
144*4882a593Smuzhiyun static inline struct vmw_dx_shader *
vmw_res_to_dx_shader(struct vmw_resource * res)145*4882a593Smuzhiyun vmw_res_to_dx_shader(struct vmw_resource *res)
146*4882a593Smuzhiyun {
147*4882a593Smuzhiyun 	return container_of(res, struct vmw_dx_shader, res);
148*4882a593Smuzhiyun }
149*4882a593Smuzhiyun 
vmw_hw_shader_destroy(struct vmw_resource * res)150*4882a593Smuzhiyun static void vmw_hw_shader_destroy(struct vmw_resource *res)
151*4882a593Smuzhiyun {
152*4882a593Smuzhiyun 	if (likely(res->func->destroy))
153*4882a593Smuzhiyun 		(void) res->func->destroy(res);
154*4882a593Smuzhiyun 	else
155*4882a593Smuzhiyun 		res->id = -1;
156*4882a593Smuzhiyun }
157*4882a593Smuzhiyun 
158*4882a593Smuzhiyun 
vmw_gb_shader_init(struct vmw_private * dev_priv,struct vmw_resource * res,uint32_t size,uint64_t offset,SVGA3dShaderType type,uint8_t num_input_sig,uint8_t num_output_sig,struct vmw_buffer_object * byte_code,void (* res_free)(struct vmw_resource * res))159*4882a593Smuzhiyun static int vmw_gb_shader_init(struct vmw_private *dev_priv,
160*4882a593Smuzhiyun 			      struct vmw_resource *res,
161*4882a593Smuzhiyun 			      uint32_t size,
162*4882a593Smuzhiyun 			      uint64_t offset,
163*4882a593Smuzhiyun 			      SVGA3dShaderType type,
164*4882a593Smuzhiyun 			      uint8_t num_input_sig,
165*4882a593Smuzhiyun 			      uint8_t num_output_sig,
166*4882a593Smuzhiyun 			      struct vmw_buffer_object *byte_code,
167*4882a593Smuzhiyun 			      void (*res_free) (struct vmw_resource *res))
168*4882a593Smuzhiyun {
169*4882a593Smuzhiyun 	struct vmw_shader *shader = vmw_res_to_shader(res);
170*4882a593Smuzhiyun 	int ret;
171*4882a593Smuzhiyun 
172*4882a593Smuzhiyun 	ret = vmw_resource_init(dev_priv, res, true, res_free,
173*4882a593Smuzhiyun 				&vmw_gb_shader_func);
174*4882a593Smuzhiyun 
175*4882a593Smuzhiyun 	if (unlikely(ret != 0)) {
176*4882a593Smuzhiyun 		if (res_free)
177*4882a593Smuzhiyun 			res_free(res);
178*4882a593Smuzhiyun 		else
179*4882a593Smuzhiyun 			kfree(res);
180*4882a593Smuzhiyun 		return ret;
181*4882a593Smuzhiyun 	}
182*4882a593Smuzhiyun 
183*4882a593Smuzhiyun 	res->backup_size = size;
184*4882a593Smuzhiyun 	if (byte_code) {
185*4882a593Smuzhiyun 		res->backup = vmw_bo_reference(byte_code);
186*4882a593Smuzhiyun 		res->backup_offset = offset;
187*4882a593Smuzhiyun 	}
188*4882a593Smuzhiyun 	shader->size = size;
189*4882a593Smuzhiyun 	shader->type = type;
190*4882a593Smuzhiyun 	shader->num_input_sig = num_input_sig;
191*4882a593Smuzhiyun 	shader->num_output_sig = num_output_sig;
192*4882a593Smuzhiyun 
193*4882a593Smuzhiyun 	res->hw_destroy = vmw_hw_shader_destroy;
194*4882a593Smuzhiyun 	return 0;
195*4882a593Smuzhiyun }
196*4882a593Smuzhiyun 
197*4882a593Smuzhiyun /*
198*4882a593Smuzhiyun  * GB shader code:
199*4882a593Smuzhiyun  */
200*4882a593Smuzhiyun 
vmw_gb_shader_create(struct vmw_resource * res)201*4882a593Smuzhiyun static int vmw_gb_shader_create(struct vmw_resource *res)
202*4882a593Smuzhiyun {
203*4882a593Smuzhiyun 	struct vmw_private *dev_priv = res->dev_priv;
204*4882a593Smuzhiyun 	struct vmw_shader *shader = vmw_res_to_shader(res);
205*4882a593Smuzhiyun 	int ret;
206*4882a593Smuzhiyun 	struct {
207*4882a593Smuzhiyun 		SVGA3dCmdHeader header;
208*4882a593Smuzhiyun 		SVGA3dCmdDefineGBShader body;
209*4882a593Smuzhiyun 	} *cmd;
210*4882a593Smuzhiyun 
211*4882a593Smuzhiyun 	if (likely(res->id != -1))
212*4882a593Smuzhiyun 		return 0;
213*4882a593Smuzhiyun 
214*4882a593Smuzhiyun 	ret = vmw_resource_alloc_id(res);
215*4882a593Smuzhiyun 	if (unlikely(ret != 0)) {
216*4882a593Smuzhiyun 		DRM_ERROR("Failed to allocate a shader id.\n");
217*4882a593Smuzhiyun 		goto out_no_id;
218*4882a593Smuzhiyun 	}
219*4882a593Smuzhiyun 
220*4882a593Smuzhiyun 	if (unlikely(res->id >= VMWGFX_NUM_GB_SHADER)) {
221*4882a593Smuzhiyun 		ret = -EBUSY;
222*4882a593Smuzhiyun 		goto out_no_fifo;
223*4882a593Smuzhiyun 	}
224*4882a593Smuzhiyun 
225*4882a593Smuzhiyun 	cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
226*4882a593Smuzhiyun 	if (unlikely(cmd == NULL)) {
227*4882a593Smuzhiyun 		ret = -ENOMEM;
228*4882a593Smuzhiyun 		goto out_no_fifo;
229*4882a593Smuzhiyun 	}
230*4882a593Smuzhiyun 
231*4882a593Smuzhiyun 	cmd->header.id = SVGA_3D_CMD_DEFINE_GB_SHADER;
232*4882a593Smuzhiyun 	cmd->header.size = sizeof(cmd->body);
233*4882a593Smuzhiyun 	cmd->body.shid = res->id;
234*4882a593Smuzhiyun 	cmd->body.type = shader->type;
235*4882a593Smuzhiyun 	cmd->body.sizeInBytes = shader->size;
236*4882a593Smuzhiyun 	vmw_fifo_commit(dev_priv, sizeof(*cmd));
237*4882a593Smuzhiyun 	vmw_fifo_resource_inc(dev_priv);
238*4882a593Smuzhiyun 
239*4882a593Smuzhiyun 	return 0;
240*4882a593Smuzhiyun 
241*4882a593Smuzhiyun out_no_fifo:
242*4882a593Smuzhiyun 	vmw_resource_release_id(res);
243*4882a593Smuzhiyun out_no_id:
244*4882a593Smuzhiyun 	return ret;
245*4882a593Smuzhiyun }
246*4882a593Smuzhiyun 
vmw_gb_shader_bind(struct vmw_resource * res,struct ttm_validate_buffer * val_buf)247*4882a593Smuzhiyun static int vmw_gb_shader_bind(struct vmw_resource *res,
248*4882a593Smuzhiyun 			      struct ttm_validate_buffer *val_buf)
249*4882a593Smuzhiyun {
250*4882a593Smuzhiyun 	struct vmw_private *dev_priv = res->dev_priv;
251*4882a593Smuzhiyun 	struct {
252*4882a593Smuzhiyun 		SVGA3dCmdHeader header;
253*4882a593Smuzhiyun 		SVGA3dCmdBindGBShader body;
254*4882a593Smuzhiyun 	} *cmd;
255*4882a593Smuzhiyun 	struct ttm_buffer_object *bo = val_buf->bo;
256*4882a593Smuzhiyun 
257*4882a593Smuzhiyun 	BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
258*4882a593Smuzhiyun 
259*4882a593Smuzhiyun 	cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
260*4882a593Smuzhiyun 	if (unlikely(cmd == NULL))
261*4882a593Smuzhiyun 		return -ENOMEM;
262*4882a593Smuzhiyun 
263*4882a593Smuzhiyun 	cmd->header.id = SVGA_3D_CMD_BIND_GB_SHADER;
264*4882a593Smuzhiyun 	cmd->header.size = sizeof(cmd->body);
265*4882a593Smuzhiyun 	cmd->body.shid = res->id;
266*4882a593Smuzhiyun 	cmd->body.mobid = bo->mem.start;
267*4882a593Smuzhiyun 	cmd->body.offsetInBytes = res->backup_offset;
268*4882a593Smuzhiyun 	res->backup_dirty = false;
269*4882a593Smuzhiyun 	vmw_fifo_commit(dev_priv, sizeof(*cmd));
270*4882a593Smuzhiyun 
271*4882a593Smuzhiyun 	return 0;
272*4882a593Smuzhiyun }
273*4882a593Smuzhiyun 
vmw_gb_shader_unbind(struct vmw_resource * res,bool readback,struct ttm_validate_buffer * val_buf)274*4882a593Smuzhiyun static int vmw_gb_shader_unbind(struct vmw_resource *res,
275*4882a593Smuzhiyun 				bool readback,
276*4882a593Smuzhiyun 				struct ttm_validate_buffer *val_buf)
277*4882a593Smuzhiyun {
278*4882a593Smuzhiyun 	struct vmw_private *dev_priv = res->dev_priv;
279*4882a593Smuzhiyun 	struct {
280*4882a593Smuzhiyun 		SVGA3dCmdHeader header;
281*4882a593Smuzhiyun 		SVGA3dCmdBindGBShader body;
282*4882a593Smuzhiyun 	} *cmd;
283*4882a593Smuzhiyun 	struct vmw_fence_obj *fence;
284*4882a593Smuzhiyun 
285*4882a593Smuzhiyun 	BUG_ON(res->backup->base.mem.mem_type != VMW_PL_MOB);
286*4882a593Smuzhiyun 
287*4882a593Smuzhiyun 	cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
288*4882a593Smuzhiyun 	if (unlikely(cmd == NULL))
289*4882a593Smuzhiyun 		return -ENOMEM;
290*4882a593Smuzhiyun 
291*4882a593Smuzhiyun 	cmd->header.id = SVGA_3D_CMD_BIND_GB_SHADER;
292*4882a593Smuzhiyun 	cmd->header.size = sizeof(cmd->body);
293*4882a593Smuzhiyun 	cmd->body.shid = res->id;
294*4882a593Smuzhiyun 	cmd->body.mobid = SVGA3D_INVALID_ID;
295*4882a593Smuzhiyun 	cmd->body.offsetInBytes = 0;
296*4882a593Smuzhiyun 	vmw_fifo_commit(dev_priv, sizeof(*cmd));
297*4882a593Smuzhiyun 
298*4882a593Smuzhiyun 	/*
299*4882a593Smuzhiyun 	 * Create a fence object and fence the backup buffer.
300*4882a593Smuzhiyun 	 */
301*4882a593Smuzhiyun 
302*4882a593Smuzhiyun 	(void) vmw_execbuf_fence_commands(NULL, dev_priv,
303*4882a593Smuzhiyun 					  &fence, NULL);
304*4882a593Smuzhiyun 
305*4882a593Smuzhiyun 	vmw_bo_fence_single(val_buf->bo, fence);
306*4882a593Smuzhiyun 
307*4882a593Smuzhiyun 	if (likely(fence != NULL))
308*4882a593Smuzhiyun 		vmw_fence_obj_unreference(&fence);
309*4882a593Smuzhiyun 
310*4882a593Smuzhiyun 	return 0;
311*4882a593Smuzhiyun }
312*4882a593Smuzhiyun 
vmw_gb_shader_destroy(struct vmw_resource * res)313*4882a593Smuzhiyun static int vmw_gb_shader_destroy(struct vmw_resource *res)
314*4882a593Smuzhiyun {
315*4882a593Smuzhiyun 	struct vmw_private *dev_priv = res->dev_priv;
316*4882a593Smuzhiyun 	struct {
317*4882a593Smuzhiyun 		SVGA3dCmdHeader header;
318*4882a593Smuzhiyun 		SVGA3dCmdDestroyGBShader body;
319*4882a593Smuzhiyun 	} *cmd;
320*4882a593Smuzhiyun 
321*4882a593Smuzhiyun 	if (likely(res->id == -1))
322*4882a593Smuzhiyun 		return 0;
323*4882a593Smuzhiyun 
324*4882a593Smuzhiyun 	mutex_lock(&dev_priv->binding_mutex);
325*4882a593Smuzhiyun 	vmw_binding_res_list_scrub(&res->binding_head);
326*4882a593Smuzhiyun 
327*4882a593Smuzhiyun 	cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
328*4882a593Smuzhiyun 	if (unlikely(cmd == NULL)) {
329*4882a593Smuzhiyun 		mutex_unlock(&dev_priv->binding_mutex);
330*4882a593Smuzhiyun 		return -ENOMEM;
331*4882a593Smuzhiyun 	}
332*4882a593Smuzhiyun 
333*4882a593Smuzhiyun 	cmd->header.id = SVGA_3D_CMD_DESTROY_GB_SHADER;
334*4882a593Smuzhiyun 	cmd->header.size = sizeof(cmd->body);
335*4882a593Smuzhiyun 	cmd->body.shid = res->id;
336*4882a593Smuzhiyun 	vmw_fifo_commit(dev_priv, sizeof(*cmd));
337*4882a593Smuzhiyun 	mutex_unlock(&dev_priv->binding_mutex);
338*4882a593Smuzhiyun 	vmw_resource_release_id(res);
339*4882a593Smuzhiyun 	vmw_fifo_resource_dec(dev_priv);
340*4882a593Smuzhiyun 
341*4882a593Smuzhiyun 	return 0;
342*4882a593Smuzhiyun }
343*4882a593Smuzhiyun 
344*4882a593Smuzhiyun /*
345*4882a593Smuzhiyun  * DX shader code:
346*4882a593Smuzhiyun  */
347*4882a593Smuzhiyun 
348*4882a593Smuzhiyun /**
349*4882a593Smuzhiyun  * vmw_dx_shader_commit_notify - Notify that a shader operation has been
350*4882a593Smuzhiyun  * committed to hardware from a user-supplied command stream.
351*4882a593Smuzhiyun  *
352*4882a593Smuzhiyun  * @res: Pointer to the shader resource.
353*4882a593Smuzhiyun  * @state: Indicating whether a creation or removal has been committed.
354*4882a593Smuzhiyun  *
355*4882a593Smuzhiyun  */
vmw_dx_shader_commit_notify(struct vmw_resource * res,enum vmw_cmdbuf_res_state state)356*4882a593Smuzhiyun static void vmw_dx_shader_commit_notify(struct vmw_resource *res,
357*4882a593Smuzhiyun 					enum vmw_cmdbuf_res_state state)
358*4882a593Smuzhiyun {
359*4882a593Smuzhiyun 	struct vmw_dx_shader *shader = vmw_res_to_dx_shader(res);
360*4882a593Smuzhiyun 	struct vmw_private *dev_priv = res->dev_priv;
361*4882a593Smuzhiyun 
362*4882a593Smuzhiyun 	if (state == VMW_CMDBUF_RES_ADD) {
363*4882a593Smuzhiyun 		mutex_lock(&dev_priv->binding_mutex);
364*4882a593Smuzhiyun 		vmw_cotable_add_resource(shader->cotable,
365*4882a593Smuzhiyun 					 &shader->cotable_head);
366*4882a593Smuzhiyun 		shader->committed = true;
367*4882a593Smuzhiyun 		res->id = shader->id;
368*4882a593Smuzhiyun 		mutex_unlock(&dev_priv->binding_mutex);
369*4882a593Smuzhiyun 	} else {
370*4882a593Smuzhiyun 		mutex_lock(&dev_priv->binding_mutex);
371*4882a593Smuzhiyun 		list_del_init(&shader->cotable_head);
372*4882a593Smuzhiyun 		shader->committed = false;
373*4882a593Smuzhiyun 		res->id = -1;
374*4882a593Smuzhiyun 		mutex_unlock(&dev_priv->binding_mutex);
375*4882a593Smuzhiyun 	}
376*4882a593Smuzhiyun }
377*4882a593Smuzhiyun 
378*4882a593Smuzhiyun /**
379*4882a593Smuzhiyun  * vmw_dx_shader_unscrub - Have the device reattach a MOB to a DX shader.
380*4882a593Smuzhiyun  *
381*4882a593Smuzhiyun  * @res: The shader resource
382*4882a593Smuzhiyun  *
383*4882a593Smuzhiyun  * This function reverts a scrub operation.
384*4882a593Smuzhiyun  */
vmw_dx_shader_unscrub(struct vmw_resource * res)385*4882a593Smuzhiyun static int vmw_dx_shader_unscrub(struct vmw_resource *res)
386*4882a593Smuzhiyun {
387*4882a593Smuzhiyun 	struct vmw_dx_shader *shader = vmw_res_to_dx_shader(res);
388*4882a593Smuzhiyun 	struct vmw_private *dev_priv = res->dev_priv;
389*4882a593Smuzhiyun 	struct {
390*4882a593Smuzhiyun 		SVGA3dCmdHeader header;
391*4882a593Smuzhiyun 		SVGA3dCmdDXBindShader body;
392*4882a593Smuzhiyun 	} *cmd;
393*4882a593Smuzhiyun 
394*4882a593Smuzhiyun 	if (!list_empty(&shader->cotable_head) || !shader->committed)
395*4882a593Smuzhiyun 		return 0;
396*4882a593Smuzhiyun 
397*4882a593Smuzhiyun 	cmd = VMW_FIFO_RESERVE_DX(dev_priv, sizeof(*cmd), shader->ctx->id);
398*4882a593Smuzhiyun 	if (unlikely(cmd == NULL))
399*4882a593Smuzhiyun 		return -ENOMEM;
400*4882a593Smuzhiyun 
401*4882a593Smuzhiyun 	cmd->header.id = SVGA_3D_CMD_DX_BIND_SHADER;
402*4882a593Smuzhiyun 	cmd->header.size = sizeof(cmd->body);
403*4882a593Smuzhiyun 	cmd->body.cid = shader->ctx->id;
404*4882a593Smuzhiyun 	cmd->body.shid = shader->id;
405*4882a593Smuzhiyun 	cmd->body.mobid = res->backup->base.mem.start;
406*4882a593Smuzhiyun 	cmd->body.offsetInBytes = res->backup_offset;
407*4882a593Smuzhiyun 	vmw_fifo_commit(dev_priv, sizeof(*cmd));
408*4882a593Smuzhiyun 
409*4882a593Smuzhiyun 	vmw_cotable_add_resource(shader->cotable, &shader->cotable_head);
410*4882a593Smuzhiyun 
411*4882a593Smuzhiyun 	return 0;
412*4882a593Smuzhiyun }
413*4882a593Smuzhiyun 
414*4882a593Smuzhiyun /**
415*4882a593Smuzhiyun  * vmw_dx_shader_create - The DX shader create callback
416*4882a593Smuzhiyun  *
417*4882a593Smuzhiyun  * @res: The DX shader resource
418*4882a593Smuzhiyun  *
419*4882a593Smuzhiyun  * The create callback is called as part of resource validation and
420*4882a593Smuzhiyun  * makes sure that we unscrub the shader if it's previously been scrubbed.
421*4882a593Smuzhiyun  */
vmw_dx_shader_create(struct vmw_resource * res)422*4882a593Smuzhiyun static int vmw_dx_shader_create(struct vmw_resource *res)
423*4882a593Smuzhiyun {
424*4882a593Smuzhiyun 	struct vmw_private *dev_priv = res->dev_priv;
425*4882a593Smuzhiyun 	struct vmw_dx_shader *shader = vmw_res_to_dx_shader(res);
426*4882a593Smuzhiyun 	int ret = 0;
427*4882a593Smuzhiyun 
428*4882a593Smuzhiyun 	WARN_ON_ONCE(!shader->committed);
429*4882a593Smuzhiyun 
430*4882a593Smuzhiyun 	if (vmw_resource_mob_attached(res)) {
431*4882a593Smuzhiyun 		mutex_lock(&dev_priv->binding_mutex);
432*4882a593Smuzhiyun 		ret = vmw_dx_shader_unscrub(res);
433*4882a593Smuzhiyun 		mutex_unlock(&dev_priv->binding_mutex);
434*4882a593Smuzhiyun 	}
435*4882a593Smuzhiyun 
436*4882a593Smuzhiyun 	res->id = shader->id;
437*4882a593Smuzhiyun 	return ret;
438*4882a593Smuzhiyun }
439*4882a593Smuzhiyun 
440*4882a593Smuzhiyun /**
441*4882a593Smuzhiyun  * vmw_dx_shader_bind - The DX shader bind callback
442*4882a593Smuzhiyun  *
443*4882a593Smuzhiyun  * @res: The DX shader resource
444*4882a593Smuzhiyun  * @val_buf: Pointer to the validate buffer.
445*4882a593Smuzhiyun  *
446*4882a593Smuzhiyun  */
vmw_dx_shader_bind(struct vmw_resource * res,struct ttm_validate_buffer * val_buf)447*4882a593Smuzhiyun static int vmw_dx_shader_bind(struct vmw_resource *res,
448*4882a593Smuzhiyun 			      struct ttm_validate_buffer *val_buf)
449*4882a593Smuzhiyun {
450*4882a593Smuzhiyun 	struct vmw_private *dev_priv = res->dev_priv;
451*4882a593Smuzhiyun 	struct ttm_buffer_object *bo = val_buf->bo;
452*4882a593Smuzhiyun 
453*4882a593Smuzhiyun 	BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
454*4882a593Smuzhiyun 	mutex_lock(&dev_priv->binding_mutex);
455*4882a593Smuzhiyun 	vmw_dx_shader_unscrub(res);
456*4882a593Smuzhiyun 	mutex_unlock(&dev_priv->binding_mutex);
457*4882a593Smuzhiyun 
458*4882a593Smuzhiyun 	return 0;
459*4882a593Smuzhiyun }
460*4882a593Smuzhiyun 
461*4882a593Smuzhiyun /**
462*4882a593Smuzhiyun  * vmw_dx_shader_scrub - Have the device unbind a MOB from a DX shader.
463*4882a593Smuzhiyun  *
464*4882a593Smuzhiyun  * @res: The shader resource
465*4882a593Smuzhiyun  *
466*4882a593Smuzhiyun  * This function unbinds a MOB from the DX shader without requiring the
467*4882a593Smuzhiyun  * MOB dma_buffer to be reserved. The driver still considers the MOB bound.
468*4882a593Smuzhiyun  * However, once the driver eventually decides to unbind the MOB, it doesn't
469*4882a593Smuzhiyun  * need to access the context.
470*4882a593Smuzhiyun  */
vmw_dx_shader_scrub(struct vmw_resource * res)471*4882a593Smuzhiyun static int vmw_dx_shader_scrub(struct vmw_resource *res)
472*4882a593Smuzhiyun {
473*4882a593Smuzhiyun 	struct vmw_dx_shader *shader = vmw_res_to_dx_shader(res);
474*4882a593Smuzhiyun 	struct vmw_private *dev_priv = res->dev_priv;
475*4882a593Smuzhiyun 	struct {
476*4882a593Smuzhiyun 		SVGA3dCmdHeader header;
477*4882a593Smuzhiyun 		SVGA3dCmdDXBindShader body;
478*4882a593Smuzhiyun 	} *cmd;
479*4882a593Smuzhiyun 
480*4882a593Smuzhiyun 	if (list_empty(&shader->cotable_head))
481*4882a593Smuzhiyun 		return 0;
482*4882a593Smuzhiyun 
483*4882a593Smuzhiyun 	WARN_ON_ONCE(!shader->committed);
484*4882a593Smuzhiyun 	cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
485*4882a593Smuzhiyun 	if (unlikely(cmd == NULL))
486*4882a593Smuzhiyun 		return -ENOMEM;
487*4882a593Smuzhiyun 
488*4882a593Smuzhiyun 	cmd->header.id = SVGA_3D_CMD_DX_BIND_SHADER;
489*4882a593Smuzhiyun 	cmd->header.size = sizeof(cmd->body);
490*4882a593Smuzhiyun 	cmd->body.cid = shader->ctx->id;
491*4882a593Smuzhiyun 	cmd->body.shid = res->id;
492*4882a593Smuzhiyun 	cmd->body.mobid = SVGA3D_INVALID_ID;
493*4882a593Smuzhiyun 	cmd->body.offsetInBytes = 0;
494*4882a593Smuzhiyun 	vmw_fifo_commit(dev_priv, sizeof(*cmd));
495*4882a593Smuzhiyun 	res->id = -1;
496*4882a593Smuzhiyun 	list_del_init(&shader->cotable_head);
497*4882a593Smuzhiyun 
498*4882a593Smuzhiyun 	return 0;
499*4882a593Smuzhiyun }
500*4882a593Smuzhiyun 
501*4882a593Smuzhiyun /**
502*4882a593Smuzhiyun  * vmw_dx_shader_unbind - The dx shader unbind callback.
503*4882a593Smuzhiyun  *
504*4882a593Smuzhiyun  * @res: The shader resource
505*4882a593Smuzhiyun  * @readback: Whether this is a readback unbind. Currently unused.
506*4882a593Smuzhiyun  * @val_buf: MOB buffer information.
507*4882a593Smuzhiyun  */
vmw_dx_shader_unbind(struct vmw_resource * res,bool readback,struct ttm_validate_buffer * val_buf)508*4882a593Smuzhiyun static int vmw_dx_shader_unbind(struct vmw_resource *res,
509*4882a593Smuzhiyun 				bool readback,
510*4882a593Smuzhiyun 				struct ttm_validate_buffer *val_buf)
511*4882a593Smuzhiyun {
512*4882a593Smuzhiyun 	struct vmw_private *dev_priv = res->dev_priv;
513*4882a593Smuzhiyun 	struct vmw_fence_obj *fence;
514*4882a593Smuzhiyun 	int ret;
515*4882a593Smuzhiyun 
516*4882a593Smuzhiyun 	BUG_ON(res->backup->base.mem.mem_type != VMW_PL_MOB);
517*4882a593Smuzhiyun 
518*4882a593Smuzhiyun 	mutex_lock(&dev_priv->binding_mutex);
519*4882a593Smuzhiyun 	ret = vmw_dx_shader_scrub(res);
520*4882a593Smuzhiyun 	mutex_unlock(&dev_priv->binding_mutex);
521*4882a593Smuzhiyun 
522*4882a593Smuzhiyun 	if (ret)
523*4882a593Smuzhiyun 		return ret;
524*4882a593Smuzhiyun 
525*4882a593Smuzhiyun 	(void) vmw_execbuf_fence_commands(NULL, dev_priv,
526*4882a593Smuzhiyun 					  &fence, NULL);
527*4882a593Smuzhiyun 	vmw_bo_fence_single(val_buf->bo, fence);
528*4882a593Smuzhiyun 
529*4882a593Smuzhiyun 	if (likely(fence != NULL))
530*4882a593Smuzhiyun 		vmw_fence_obj_unreference(&fence);
531*4882a593Smuzhiyun 
532*4882a593Smuzhiyun 	return 0;
533*4882a593Smuzhiyun }
534*4882a593Smuzhiyun 
535*4882a593Smuzhiyun /**
536*4882a593Smuzhiyun  * vmw_dx_shader_cotable_list_scrub - The cotable unbind_func callback for
537*4882a593Smuzhiyun  * DX shaders.
538*4882a593Smuzhiyun  *
539*4882a593Smuzhiyun  * @dev_priv: Pointer to device private structure.
540*4882a593Smuzhiyun  * @list: The list of cotable resources.
541*4882a593Smuzhiyun  * @readback: Whether the call was part of a readback unbind.
542*4882a593Smuzhiyun  *
543*4882a593Smuzhiyun  * Scrubs all shader MOBs so that any subsequent shader unbind or shader
544*4882a593Smuzhiyun  * destroy operation won't need to swap in the context.
545*4882a593Smuzhiyun  */
vmw_dx_shader_cotable_list_scrub(struct vmw_private * dev_priv,struct list_head * list,bool readback)546*4882a593Smuzhiyun void vmw_dx_shader_cotable_list_scrub(struct vmw_private *dev_priv,
547*4882a593Smuzhiyun 				      struct list_head *list,
548*4882a593Smuzhiyun 				      bool readback)
549*4882a593Smuzhiyun {
550*4882a593Smuzhiyun 	struct vmw_dx_shader *entry, *next;
551*4882a593Smuzhiyun 
552*4882a593Smuzhiyun 	lockdep_assert_held_once(&dev_priv->binding_mutex);
553*4882a593Smuzhiyun 
554*4882a593Smuzhiyun 	list_for_each_entry_safe(entry, next, list, cotable_head) {
555*4882a593Smuzhiyun 		WARN_ON(vmw_dx_shader_scrub(&entry->res));
556*4882a593Smuzhiyun 		if (!readback)
557*4882a593Smuzhiyun 			entry->committed = false;
558*4882a593Smuzhiyun 	}
559*4882a593Smuzhiyun }
560*4882a593Smuzhiyun 
561*4882a593Smuzhiyun /**
562*4882a593Smuzhiyun  * vmw_dx_shader_res_free - The DX shader free callback
563*4882a593Smuzhiyun  *
564*4882a593Smuzhiyun  * @res: The shader resource
565*4882a593Smuzhiyun  *
566*4882a593Smuzhiyun  * Frees the DX shader resource and updates memory accounting.
567*4882a593Smuzhiyun  */
vmw_dx_shader_res_free(struct vmw_resource * res)568*4882a593Smuzhiyun static void vmw_dx_shader_res_free(struct vmw_resource *res)
569*4882a593Smuzhiyun {
570*4882a593Smuzhiyun 	struct vmw_private *dev_priv = res->dev_priv;
571*4882a593Smuzhiyun 	struct vmw_dx_shader *shader = vmw_res_to_dx_shader(res);
572*4882a593Smuzhiyun 
573*4882a593Smuzhiyun 	vmw_resource_unreference(&shader->cotable);
574*4882a593Smuzhiyun 	kfree(shader);
575*4882a593Smuzhiyun 	ttm_mem_global_free(vmw_mem_glob(dev_priv), vmw_shader_dx_size);
576*4882a593Smuzhiyun }
577*4882a593Smuzhiyun 
578*4882a593Smuzhiyun /**
579*4882a593Smuzhiyun  * vmw_dx_shader_add - Add a shader resource as a command buffer managed
580*4882a593Smuzhiyun  * resource.
581*4882a593Smuzhiyun  *
582*4882a593Smuzhiyun  * @man: The command buffer resource manager.
583*4882a593Smuzhiyun  * @ctx: Pointer to the context resource.
584*4882a593Smuzhiyun  * @user_key: The id used for this shader.
585*4882a593Smuzhiyun  * @shader_type: The shader type.
586*4882a593Smuzhiyun  * @list: The list of staged command buffer managed resources.
587*4882a593Smuzhiyun  */
vmw_dx_shader_add(struct vmw_cmdbuf_res_manager * man,struct vmw_resource * ctx,u32 user_key,SVGA3dShaderType shader_type,struct list_head * list)588*4882a593Smuzhiyun int vmw_dx_shader_add(struct vmw_cmdbuf_res_manager *man,
589*4882a593Smuzhiyun 		      struct vmw_resource *ctx,
590*4882a593Smuzhiyun 		      u32 user_key,
591*4882a593Smuzhiyun 		      SVGA3dShaderType shader_type,
592*4882a593Smuzhiyun 		      struct list_head *list)
593*4882a593Smuzhiyun {
594*4882a593Smuzhiyun 	struct vmw_dx_shader *shader;
595*4882a593Smuzhiyun 	struct vmw_resource *res;
596*4882a593Smuzhiyun 	struct vmw_private *dev_priv = ctx->dev_priv;
597*4882a593Smuzhiyun 	struct ttm_operation_ctx ttm_opt_ctx = {
598*4882a593Smuzhiyun 		.interruptible = true,
599*4882a593Smuzhiyun 		.no_wait_gpu = false
600*4882a593Smuzhiyun 	};
601*4882a593Smuzhiyun 	int ret;
602*4882a593Smuzhiyun 
603*4882a593Smuzhiyun 	if (!vmw_shader_dx_size)
604*4882a593Smuzhiyun 		vmw_shader_dx_size = ttm_round_pot(sizeof(*shader));
605*4882a593Smuzhiyun 
606*4882a593Smuzhiyun 	if (!vmw_shader_id_ok(user_key, shader_type))
607*4882a593Smuzhiyun 		return -EINVAL;
608*4882a593Smuzhiyun 
609*4882a593Smuzhiyun 	ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv), vmw_shader_dx_size,
610*4882a593Smuzhiyun 				   &ttm_opt_ctx);
611*4882a593Smuzhiyun 	if (ret) {
612*4882a593Smuzhiyun 		if (ret != -ERESTARTSYS)
613*4882a593Smuzhiyun 			DRM_ERROR("Out of graphics memory for shader "
614*4882a593Smuzhiyun 				  "creation.\n");
615*4882a593Smuzhiyun 		return ret;
616*4882a593Smuzhiyun 	}
617*4882a593Smuzhiyun 
618*4882a593Smuzhiyun 	shader = kmalloc(sizeof(*shader), GFP_KERNEL);
619*4882a593Smuzhiyun 	if (!shader) {
620*4882a593Smuzhiyun 		ttm_mem_global_free(vmw_mem_glob(dev_priv), vmw_shader_dx_size);
621*4882a593Smuzhiyun 		return -ENOMEM;
622*4882a593Smuzhiyun 	}
623*4882a593Smuzhiyun 
624*4882a593Smuzhiyun 	res = &shader->res;
625*4882a593Smuzhiyun 	shader->ctx = ctx;
626*4882a593Smuzhiyun 	shader->cotable = vmw_resource_reference
627*4882a593Smuzhiyun 		(vmw_context_cotable(ctx, SVGA_COTABLE_DXSHADER));
628*4882a593Smuzhiyun 	shader->id = user_key;
629*4882a593Smuzhiyun 	shader->committed = false;
630*4882a593Smuzhiyun 	INIT_LIST_HEAD(&shader->cotable_head);
631*4882a593Smuzhiyun 	ret = vmw_resource_init(dev_priv, res, true,
632*4882a593Smuzhiyun 				vmw_dx_shader_res_free, &vmw_dx_shader_func);
633*4882a593Smuzhiyun 	if (ret)
634*4882a593Smuzhiyun 		goto out_resource_init;
635*4882a593Smuzhiyun 
636*4882a593Smuzhiyun 	/*
637*4882a593Smuzhiyun 	 * The user_key name-space is not per shader type for DX shaders,
638*4882a593Smuzhiyun 	 * so when hashing, use a single zero shader type.
639*4882a593Smuzhiyun 	 */
640*4882a593Smuzhiyun 	ret = vmw_cmdbuf_res_add(man, vmw_cmdbuf_res_shader,
641*4882a593Smuzhiyun 				 vmw_shader_key(user_key, 0),
642*4882a593Smuzhiyun 				 res, list);
643*4882a593Smuzhiyun 	if (ret)
644*4882a593Smuzhiyun 		goto out_resource_init;
645*4882a593Smuzhiyun 
646*4882a593Smuzhiyun 	res->id = shader->id;
647*4882a593Smuzhiyun 	res->hw_destroy = vmw_hw_shader_destroy;
648*4882a593Smuzhiyun 
649*4882a593Smuzhiyun out_resource_init:
650*4882a593Smuzhiyun 	vmw_resource_unreference(&res);
651*4882a593Smuzhiyun 
652*4882a593Smuzhiyun 	return ret;
653*4882a593Smuzhiyun }
654*4882a593Smuzhiyun 
655*4882a593Smuzhiyun 
656*4882a593Smuzhiyun 
657*4882a593Smuzhiyun /**
658*4882a593Smuzhiyun  * User-space shader management:
659*4882a593Smuzhiyun  */
660*4882a593Smuzhiyun 
661*4882a593Smuzhiyun static struct vmw_resource *
vmw_user_shader_base_to_res(struct ttm_base_object * base)662*4882a593Smuzhiyun vmw_user_shader_base_to_res(struct ttm_base_object *base)
663*4882a593Smuzhiyun {
664*4882a593Smuzhiyun 	return &(container_of(base, struct vmw_user_shader, base)->
665*4882a593Smuzhiyun 		 shader.res);
666*4882a593Smuzhiyun }
667*4882a593Smuzhiyun 
vmw_user_shader_free(struct vmw_resource * res)668*4882a593Smuzhiyun static void vmw_user_shader_free(struct vmw_resource *res)
669*4882a593Smuzhiyun {
670*4882a593Smuzhiyun 	struct vmw_user_shader *ushader =
671*4882a593Smuzhiyun 		container_of(res, struct vmw_user_shader, shader.res);
672*4882a593Smuzhiyun 	struct vmw_private *dev_priv = res->dev_priv;
673*4882a593Smuzhiyun 
674*4882a593Smuzhiyun 	ttm_base_object_kfree(ushader, base);
675*4882a593Smuzhiyun 	ttm_mem_global_free(vmw_mem_glob(dev_priv),
676*4882a593Smuzhiyun 			    vmw_user_shader_size);
677*4882a593Smuzhiyun }
678*4882a593Smuzhiyun 
vmw_shader_free(struct vmw_resource * res)679*4882a593Smuzhiyun static void vmw_shader_free(struct vmw_resource *res)
680*4882a593Smuzhiyun {
681*4882a593Smuzhiyun 	struct vmw_shader *shader = vmw_res_to_shader(res);
682*4882a593Smuzhiyun 	struct vmw_private *dev_priv = res->dev_priv;
683*4882a593Smuzhiyun 
684*4882a593Smuzhiyun 	kfree(shader);
685*4882a593Smuzhiyun 	ttm_mem_global_free(vmw_mem_glob(dev_priv),
686*4882a593Smuzhiyun 			    vmw_shader_size);
687*4882a593Smuzhiyun }
688*4882a593Smuzhiyun 
689*4882a593Smuzhiyun /**
690*4882a593Smuzhiyun  * This function is called when user space has no more references on the
691*4882a593Smuzhiyun  * base object. It releases the base-object's reference on the resource object.
692*4882a593Smuzhiyun  */
693*4882a593Smuzhiyun 
vmw_user_shader_base_release(struct ttm_base_object ** p_base)694*4882a593Smuzhiyun static void vmw_user_shader_base_release(struct ttm_base_object **p_base)
695*4882a593Smuzhiyun {
696*4882a593Smuzhiyun 	struct ttm_base_object *base = *p_base;
697*4882a593Smuzhiyun 	struct vmw_resource *res = vmw_user_shader_base_to_res(base);
698*4882a593Smuzhiyun 
699*4882a593Smuzhiyun 	*p_base = NULL;
700*4882a593Smuzhiyun 	vmw_resource_unreference(&res);
701*4882a593Smuzhiyun }
702*4882a593Smuzhiyun 
vmw_shader_destroy_ioctl(struct drm_device * dev,void * data,struct drm_file * file_priv)703*4882a593Smuzhiyun int vmw_shader_destroy_ioctl(struct drm_device *dev, void *data,
704*4882a593Smuzhiyun 			      struct drm_file *file_priv)
705*4882a593Smuzhiyun {
706*4882a593Smuzhiyun 	struct drm_vmw_shader_arg *arg = (struct drm_vmw_shader_arg *)data;
707*4882a593Smuzhiyun 	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
708*4882a593Smuzhiyun 
709*4882a593Smuzhiyun 	return ttm_ref_object_base_unref(tfile, arg->handle,
710*4882a593Smuzhiyun 					 TTM_REF_USAGE);
711*4882a593Smuzhiyun }
712*4882a593Smuzhiyun 
vmw_user_shader_alloc(struct vmw_private * dev_priv,struct vmw_buffer_object * buffer,size_t shader_size,size_t offset,SVGA3dShaderType shader_type,uint8_t num_input_sig,uint8_t num_output_sig,struct ttm_object_file * tfile,u32 * handle)713*4882a593Smuzhiyun static int vmw_user_shader_alloc(struct vmw_private *dev_priv,
714*4882a593Smuzhiyun 				 struct vmw_buffer_object *buffer,
715*4882a593Smuzhiyun 				 size_t shader_size,
716*4882a593Smuzhiyun 				 size_t offset,
717*4882a593Smuzhiyun 				 SVGA3dShaderType shader_type,
718*4882a593Smuzhiyun 				 uint8_t num_input_sig,
719*4882a593Smuzhiyun 				 uint8_t num_output_sig,
720*4882a593Smuzhiyun 				 struct ttm_object_file *tfile,
721*4882a593Smuzhiyun 				 u32 *handle)
722*4882a593Smuzhiyun {
723*4882a593Smuzhiyun 	struct vmw_user_shader *ushader;
724*4882a593Smuzhiyun 	struct vmw_resource *res, *tmp;
725*4882a593Smuzhiyun 	struct ttm_operation_ctx ctx = {
726*4882a593Smuzhiyun 		.interruptible = true,
727*4882a593Smuzhiyun 		.no_wait_gpu = false
728*4882a593Smuzhiyun 	};
729*4882a593Smuzhiyun 	int ret;
730*4882a593Smuzhiyun 
731*4882a593Smuzhiyun 	if (unlikely(vmw_user_shader_size == 0))
732*4882a593Smuzhiyun 		vmw_user_shader_size =
733*4882a593Smuzhiyun 			ttm_round_pot(sizeof(struct vmw_user_shader)) +
734*4882a593Smuzhiyun 			VMW_IDA_ACC_SIZE + TTM_OBJ_EXTRA_SIZE;
735*4882a593Smuzhiyun 
736*4882a593Smuzhiyun 	ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
737*4882a593Smuzhiyun 				   vmw_user_shader_size,
738*4882a593Smuzhiyun 				   &ctx);
739*4882a593Smuzhiyun 	if (unlikely(ret != 0)) {
740*4882a593Smuzhiyun 		if (ret != -ERESTARTSYS)
741*4882a593Smuzhiyun 			DRM_ERROR("Out of graphics memory for shader "
742*4882a593Smuzhiyun 				  "creation.\n");
743*4882a593Smuzhiyun 		goto out;
744*4882a593Smuzhiyun 	}
745*4882a593Smuzhiyun 
746*4882a593Smuzhiyun 	ushader = kzalloc(sizeof(*ushader), GFP_KERNEL);
747*4882a593Smuzhiyun 	if (unlikely(!ushader)) {
748*4882a593Smuzhiyun 		ttm_mem_global_free(vmw_mem_glob(dev_priv),
749*4882a593Smuzhiyun 				    vmw_user_shader_size);
750*4882a593Smuzhiyun 		ret = -ENOMEM;
751*4882a593Smuzhiyun 		goto out;
752*4882a593Smuzhiyun 	}
753*4882a593Smuzhiyun 
754*4882a593Smuzhiyun 	res = &ushader->shader.res;
755*4882a593Smuzhiyun 	ushader->base.shareable = false;
756*4882a593Smuzhiyun 	ushader->base.tfile = NULL;
757*4882a593Smuzhiyun 
758*4882a593Smuzhiyun 	/*
759*4882a593Smuzhiyun 	 * From here on, the destructor takes over resource freeing.
760*4882a593Smuzhiyun 	 */
761*4882a593Smuzhiyun 
762*4882a593Smuzhiyun 	ret = vmw_gb_shader_init(dev_priv, res, shader_size,
763*4882a593Smuzhiyun 				 offset, shader_type, num_input_sig,
764*4882a593Smuzhiyun 				 num_output_sig, buffer,
765*4882a593Smuzhiyun 				 vmw_user_shader_free);
766*4882a593Smuzhiyun 	if (unlikely(ret != 0))
767*4882a593Smuzhiyun 		goto out;
768*4882a593Smuzhiyun 
769*4882a593Smuzhiyun 	tmp = vmw_resource_reference(res);
770*4882a593Smuzhiyun 	ret = ttm_base_object_init(tfile, &ushader->base, false,
771*4882a593Smuzhiyun 				   VMW_RES_SHADER,
772*4882a593Smuzhiyun 				   &vmw_user_shader_base_release, NULL);
773*4882a593Smuzhiyun 
774*4882a593Smuzhiyun 	if (unlikely(ret != 0)) {
775*4882a593Smuzhiyun 		vmw_resource_unreference(&tmp);
776*4882a593Smuzhiyun 		goto out_err;
777*4882a593Smuzhiyun 	}
778*4882a593Smuzhiyun 
779*4882a593Smuzhiyun 	if (handle)
780*4882a593Smuzhiyun 		*handle = ushader->base.handle;
781*4882a593Smuzhiyun out_err:
782*4882a593Smuzhiyun 	vmw_resource_unreference(&res);
783*4882a593Smuzhiyun out:
784*4882a593Smuzhiyun 	return ret;
785*4882a593Smuzhiyun }
786*4882a593Smuzhiyun 
787*4882a593Smuzhiyun 
vmw_shader_alloc(struct vmw_private * dev_priv,struct vmw_buffer_object * buffer,size_t shader_size,size_t offset,SVGA3dShaderType shader_type)788*4882a593Smuzhiyun static struct vmw_resource *vmw_shader_alloc(struct vmw_private *dev_priv,
789*4882a593Smuzhiyun 					     struct vmw_buffer_object *buffer,
790*4882a593Smuzhiyun 					     size_t shader_size,
791*4882a593Smuzhiyun 					     size_t offset,
792*4882a593Smuzhiyun 					     SVGA3dShaderType shader_type)
793*4882a593Smuzhiyun {
794*4882a593Smuzhiyun 	struct vmw_shader *shader;
795*4882a593Smuzhiyun 	struct vmw_resource *res;
796*4882a593Smuzhiyun 	struct ttm_operation_ctx ctx = {
797*4882a593Smuzhiyun 		.interruptible = true,
798*4882a593Smuzhiyun 		.no_wait_gpu = false
799*4882a593Smuzhiyun 	};
800*4882a593Smuzhiyun 	int ret;
801*4882a593Smuzhiyun 
802*4882a593Smuzhiyun 	if (unlikely(vmw_shader_size == 0))
803*4882a593Smuzhiyun 		vmw_shader_size =
804*4882a593Smuzhiyun 			ttm_round_pot(sizeof(struct vmw_shader)) +
805*4882a593Smuzhiyun 			VMW_IDA_ACC_SIZE;
806*4882a593Smuzhiyun 
807*4882a593Smuzhiyun 	ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
808*4882a593Smuzhiyun 				   vmw_shader_size,
809*4882a593Smuzhiyun 				   &ctx);
810*4882a593Smuzhiyun 	if (unlikely(ret != 0)) {
811*4882a593Smuzhiyun 		if (ret != -ERESTARTSYS)
812*4882a593Smuzhiyun 			DRM_ERROR("Out of graphics memory for shader "
813*4882a593Smuzhiyun 				  "creation.\n");
814*4882a593Smuzhiyun 		goto out_err;
815*4882a593Smuzhiyun 	}
816*4882a593Smuzhiyun 
817*4882a593Smuzhiyun 	shader = kzalloc(sizeof(*shader), GFP_KERNEL);
818*4882a593Smuzhiyun 	if (unlikely(!shader)) {
819*4882a593Smuzhiyun 		ttm_mem_global_free(vmw_mem_glob(dev_priv),
820*4882a593Smuzhiyun 				    vmw_shader_size);
821*4882a593Smuzhiyun 		ret = -ENOMEM;
822*4882a593Smuzhiyun 		goto out_err;
823*4882a593Smuzhiyun 	}
824*4882a593Smuzhiyun 
825*4882a593Smuzhiyun 	res = &shader->res;
826*4882a593Smuzhiyun 
827*4882a593Smuzhiyun 	/*
828*4882a593Smuzhiyun 	 * From here on, the destructor takes over resource freeing.
829*4882a593Smuzhiyun 	 */
830*4882a593Smuzhiyun 	ret = vmw_gb_shader_init(dev_priv, res, shader_size,
831*4882a593Smuzhiyun 				 offset, shader_type, 0, 0, buffer,
832*4882a593Smuzhiyun 				 vmw_shader_free);
833*4882a593Smuzhiyun 
834*4882a593Smuzhiyun out_err:
835*4882a593Smuzhiyun 	return ret ? ERR_PTR(ret) : res;
836*4882a593Smuzhiyun }
837*4882a593Smuzhiyun 
838*4882a593Smuzhiyun 
vmw_shader_define(struct drm_device * dev,struct drm_file * file_priv,enum drm_vmw_shader_type shader_type_drm,u32 buffer_handle,size_t size,size_t offset,uint8_t num_input_sig,uint8_t num_output_sig,uint32_t * shader_handle)839*4882a593Smuzhiyun static int vmw_shader_define(struct drm_device *dev, struct drm_file *file_priv,
840*4882a593Smuzhiyun 			     enum drm_vmw_shader_type shader_type_drm,
841*4882a593Smuzhiyun 			     u32 buffer_handle, size_t size, size_t offset,
842*4882a593Smuzhiyun 			     uint8_t num_input_sig, uint8_t num_output_sig,
843*4882a593Smuzhiyun 			     uint32_t *shader_handle)
844*4882a593Smuzhiyun {
845*4882a593Smuzhiyun 	struct vmw_private *dev_priv = vmw_priv(dev);
846*4882a593Smuzhiyun 	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
847*4882a593Smuzhiyun 	struct vmw_buffer_object *buffer = NULL;
848*4882a593Smuzhiyun 	SVGA3dShaderType shader_type;
849*4882a593Smuzhiyun 	int ret;
850*4882a593Smuzhiyun 
851*4882a593Smuzhiyun 	if (buffer_handle != SVGA3D_INVALID_ID) {
852*4882a593Smuzhiyun 		ret = vmw_user_bo_lookup(tfile, buffer_handle,
853*4882a593Smuzhiyun 					     &buffer, NULL);
854*4882a593Smuzhiyun 		if (unlikely(ret != 0)) {
855*4882a593Smuzhiyun 			VMW_DEBUG_USER("Couldn't find buffer for shader creation.\n");
856*4882a593Smuzhiyun 			return ret;
857*4882a593Smuzhiyun 		}
858*4882a593Smuzhiyun 
859*4882a593Smuzhiyun 		if ((u64)buffer->base.num_pages * PAGE_SIZE <
860*4882a593Smuzhiyun 		    (u64)size + (u64)offset) {
861*4882a593Smuzhiyun 			VMW_DEBUG_USER("Illegal buffer- or shader size.\n");
862*4882a593Smuzhiyun 			ret = -EINVAL;
863*4882a593Smuzhiyun 			goto out_bad_arg;
864*4882a593Smuzhiyun 		}
865*4882a593Smuzhiyun 	}
866*4882a593Smuzhiyun 
867*4882a593Smuzhiyun 	switch (shader_type_drm) {
868*4882a593Smuzhiyun 	case drm_vmw_shader_type_vs:
869*4882a593Smuzhiyun 		shader_type = SVGA3D_SHADERTYPE_VS;
870*4882a593Smuzhiyun 		break;
871*4882a593Smuzhiyun 	case drm_vmw_shader_type_ps:
872*4882a593Smuzhiyun 		shader_type = SVGA3D_SHADERTYPE_PS;
873*4882a593Smuzhiyun 		break;
874*4882a593Smuzhiyun 	default:
875*4882a593Smuzhiyun 		VMW_DEBUG_USER("Illegal shader type.\n");
876*4882a593Smuzhiyun 		ret = -EINVAL;
877*4882a593Smuzhiyun 		goto out_bad_arg;
878*4882a593Smuzhiyun 	}
879*4882a593Smuzhiyun 
880*4882a593Smuzhiyun 	ret = ttm_read_lock(&dev_priv->reservation_sem, true);
881*4882a593Smuzhiyun 	if (unlikely(ret != 0))
882*4882a593Smuzhiyun 		goto out_bad_arg;
883*4882a593Smuzhiyun 
884*4882a593Smuzhiyun 	ret = vmw_user_shader_alloc(dev_priv, buffer, size, offset,
885*4882a593Smuzhiyun 				    shader_type, num_input_sig,
886*4882a593Smuzhiyun 				    num_output_sig, tfile, shader_handle);
887*4882a593Smuzhiyun 
888*4882a593Smuzhiyun 	ttm_read_unlock(&dev_priv->reservation_sem);
889*4882a593Smuzhiyun out_bad_arg:
890*4882a593Smuzhiyun 	vmw_bo_unreference(&buffer);
891*4882a593Smuzhiyun 	return ret;
892*4882a593Smuzhiyun }
893*4882a593Smuzhiyun 
894*4882a593Smuzhiyun /**
895*4882a593Smuzhiyun  * vmw_shader_id_ok - Check whether a compat shader user key and
896*4882a593Smuzhiyun  * shader type are within valid bounds.
897*4882a593Smuzhiyun  *
898*4882a593Smuzhiyun  * @user_key: User space id of the shader.
899*4882a593Smuzhiyun  * @shader_type: Shader type.
900*4882a593Smuzhiyun  *
901*4882a593Smuzhiyun  * Returns true if valid false if not.
902*4882a593Smuzhiyun  */
vmw_shader_id_ok(u32 user_key,SVGA3dShaderType shader_type)903*4882a593Smuzhiyun static bool vmw_shader_id_ok(u32 user_key, SVGA3dShaderType shader_type)
904*4882a593Smuzhiyun {
905*4882a593Smuzhiyun 	return user_key <= ((1 << 20) - 1) && (unsigned) shader_type < 16;
906*4882a593Smuzhiyun }
907*4882a593Smuzhiyun 
908*4882a593Smuzhiyun /**
909*4882a593Smuzhiyun  * vmw_shader_key - Compute a hash key suitable for a compat shader.
910*4882a593Smuzhiyun  *
911*4882a593Smuzhiyun  * @user_key: User space id of the shader.
912*4882a593Smuzhiyun  * @shader_type: Shader type.
913*4882a593Smuzhiyun  *
914*4882a593Smuzhiyun  * Returns a hash key suitable for a command buffer managed resource
915*4882a593Smuzhiyun  * manager hash table.
916*4882a593Smuzhiyun  */
vmw_shader_key(u32 user_key,SVGA3dShaderType shader_type)917*4882a593Smuzhiyun static u32 vmw_shader_key(u32 user_key, SVGA3dShaderType shader_type)
918*4882a593Smuzhiyun {
919*4882a593Smuzhiyun 	return user_key | (shader_type << 20);
920*4882a593Smuzhiyun }
921*4882a593Smuzhiyun 
922*4882a593Smuzhiyun /**
923*4882a593Smuzhiyun  * vmw_shader_remove - Stage a compat shader for removal.
924*4882a593Smuzhiyun  *
925*4882a593Smuzhiyun  * @man: Pointer to the compat shader manager identifying the shader namespace.
926*4882a593Smuzhiyun  * @user_key: The key that is used to identify the shader. The key is
927*4882a593Smuzhiyun  * unique to the shader type.
928*4882a593Smuzhiyun  * @shader_type: Shader type.
929*4882a593Smuzhiyun  * @list: Caller's list of staged command buffer resource actions.
930*4882a593Smuzhiyun  */
vmw_shader_remove(struct vmw_cmdbuf_res_manager * man,u32 user_key,SVGA3dShaderType shader_type,struct list_head * list)931*4882a593Smuzhiyun int vmw_shader_remove(struct vmw_cmdbuf_res_manager *man,
932*4882a593Smuzhiyun 		      u32 user_key, SVGA3dShaderType shader_type,
933*4882a593Smuzhiyun 		      struct list_head *list)
934*4882a593Smuzhiyun {
935*4882a593Smuzhiyun 	struct vmw_resource *dummy;
936*4882a593Smuzhiyun 
937*4882a593Smuzhiyun 	if (!vmw_shader_id_ok(user_key, shader_type))
938*4882a593Smuzhiyun 		return -EINVAL;
939*4882a593Smuzhiyun 
940*4882a593Smuzhiyun 	return vmw_cmdbuf_res_remove(man, vmw_cmdbuf_res_shader,
941*4882a593Smuzhiyun 				     vmw_shader_key(user_key, shader_type),
942*4882a593Smuzhiyun 				     list, &dummy);
943*4882a593Smuzhiyun }
944*4882a593Smuzhiyun 
945*4882a593Smuzhiyun /**
946*4882a593Smuzhiyun  * vmw_compat_shader_add - Create a compat shader and stage it for addition
947*4882a593Smuzhiyun  * as a command buffer managed resource.
948*4882a593Smuzhiyun  *
949*4882a593Smuzhiyun  * @man: Pointer to the compat shader manager identifying the shader namespace.
950*4882a593Smuzhiyun  * @user_key: The key that is used to identify the shader. The key is
951*4882a593Smuzhiyun  * unique to the shader type.
952*4882a593Smuzhiyun  * @bytecode: Pointer to the bytecode of the shader.
953*4882a593Smuzhiyun  * @shader_type: Shader type.
954*4882a593Smuzhiyun  * @tfile: Pointer to a struct ttm_object_file that the guest-backed shader is
955*4882a593Smuzhiyun  * to be created with.
956*4882a593Smuzhiyun  * @list: Caller's list of staged command buffer resource actions.
957*4882a593Smuzhiyun  *
958*4882a593Smuzhiyun  */
vmw_compat_shader_add(struct vmw_private * dev_priv,struct vmw_cmdbuf_res_manager * man,u32 user_key,const void * bytecode,SVGA3dShaderType shader_type,size_t size,struct list_head * list)959*4882a593Smuzhiyun int vmw_compat_shader_add(struct vmw_private *dev_priv,
960*4882a593Smuzhiyun 			  struct vmw_cmdbuf_res_manager *man,
961*4882a593Smuzhiyun 			  u32 user_key, const void *bytecode,
962*4882a593Smuzhiyun 			  SVGA3dShaderType shader_type,
963*4882a593Smuzhiyun 			  size_t size,
964*4882a593Smuzhiyun 			  struct list_head *list)
965*4882a593Smuzhiyun {
966*4882a593Smuzhiyun 	struct ttm_operation_ctx ctx = { false, true };
967*4882a593Smuzhiyun 	struct vmw_buffer_object *buf;
968*4882a593Smuzhiyun 	struct ttm_bo_kmap_obj map;
969*4882a593Smuzhiyun 	bool is_iomem;
970*4882a593Smuzhiyun 	int ret;
971*4882a593Smuzhiyun 	struct vmw_resource *res;
972*4882a593Smuzhiyun 
973*4882a593Smuzhiyun 	if (!vmw_shader_id_ok(user_key, shader_type))
974*4882a593Smuzhiyun 		return -EINVAL;
975*4882a593Smuzhiyun 
976*4882a593Smuzhiyun 	/* Allocate and pin a DMA buffer */
977*4882a593Smuzhiyun 	buf = kzalloc(sizeof(*buf), GFP_KERNEL);
978*4882a593Smuzhiyun 	if (unlikely(!buf))
979*4882a593Smuzhiyun 		return -ENOMEM;
980*4882a593Smuzhiyun 
981*4882a593Smuzhiyun 	ret = vmw_bo_init(dev_priv, buf, size, &vmw_sys_ne_placement,
982*4882a593Smuzhiyun 			      true, vmw_bo_bo_free);
983*4882a593Smuzhiyun 	if (unlikely(ret != 0))
984*4882a593Smuzhiyun 		goto out;
985*4882a593Smuzhiyun 
986*4882a593Smuzhiyun 	ret = ttm_bo_reserve(&buf->base, false, true, NULL);
987*4882a593Smuzhiyun 	if (unlikely(ret != 0))
988*4882a593Smuzhiyun 		goto no_reserve;
989*4882a593Smuzhiyun 
990*4882a593Smuzhiyun 	/* Map and copy shader bytecode. */
991*4882a593Smuzhiyun 	ret = ttm_bo_kmap(&buf->base, 0, PAGE_ALIGN(size) >> PAGE_SHIFT,
992*4882a593Smuzhiyun 			  &map);
993*4882a593Smuzhiyun 	if (unlikely(ret != 0)) {
994*4882a593Smuzhiyun 		ttm_bo_unreserve(&buf->base);
995*4882a593Smuzhiyun 		goto no_reserve;
996*4882a593Smuzhiyun 	}
997*4882a593Smuzhiyun 
998*4882a593Smuzhiyun 	memcpy(ttm_kmap_obj_virtual(&map, &is_iomem), bytecode, size);
999*4882a593Smuzhiyun 	WARN_ON(is_iomem);
1000*4882a593Smuzhiyun 
1001*4882a593Smuzhiyun 	ttm_bo_kunmap(&map);
1002*4882a593Smuzhiyun 	ret = ttm_bo_validate(&buf->base, &vmw_sys_placement, &ctx);
1003*4882a593Smuzhiyun 	WARN_ON(ret != 0);
1004*4882a593Smuzhiyun 	ttm_bo_unreserve(&buf->base);
1005*4882a593Smuzhiyun 
1006*4882a593Smuzhiyun 	res = vmw_shader_alloc(dev_priv, buf, size, 0, shader_type);
1007*4882a593Smuzhiyun 	if (unlikely(ret != 0))
1008*4882a593Smuzhiyun 		goto no_reserve;
1009*4882a593Smuzhiyun 
1010*4882a593Smuzhiyun 	ret = vmw_cmdbuf_res_add(man, vmw_cmdbuf_res_shader,
1011*4882a593Smuzhiyun 				 vmw_shader_key(user_key, shader_type),
1012*4882a593Smuzhiyun 				 res, list);
1013*4882a593Smuzhiyun 	vmw_resource_unreference(&res);
1014*4882a593Smuzhiyun no_reserve:
1015*4882a593Smuzhiyun 	vmw_bo_unreference(&buf);
1016*4882a593Smuzhiyun out:
1017*4882a593Smuzhiyun 	return ret;
1018*4882a593Smuzhiyun }
1019*4882a593Smuzhiyun 
1020*4882a593Smuzhiyun /**
1021*4882a593Smuzhiyun  * vmw_shader_lookup - Look up a compat shader
1022*4882a593Smuzhiyun  *
1023*4882a593Smuzhiyun  * @man: Pointer to the command buffer managed resource manager identifying
1024*4882a593Smuzhiyun  * the shader namespace.
1025*4882a593Smuzhiyun  * @user_key: The user space id of the shader.
1026*4882a593Smuzhiyun  * @shader_type: The shader type.
1027*4882a593Smuzhiyun  *
1028*4882a593Smuzhiyun  * Returns a refcounted pointer to a struct vmw_resource if the shader was
1029*4882a593Smuzhiyun  * found. An error pointer otherwise.
1030*4882a593Smuzhiyun  */
1031*4882a593Smuzhiyun struct vmw_resource *
vmw_shader_lookup(struct vmw_cmdbuf_res_manager * man,u32 user_key,SVGA3dShaderType shader_type)1032*4882a593Smuzhiyun vmw_shader_lookup(struct vmw_cmdbuf_res_manager *man,
1033*4882a593Smuzhiyun 		  u32 user_key,
1034*4882a593Smuzhiyun 		  SVGA3dShaderType shader_type)
1035*4882a593Smuzhiyun {
1036*4882a593Smuzhiyun 	if (!vmw_shader_id_ok(user_key, shader_type))
1037*4882a593Smuzhiyun 		return ERR_PTR(-EINVAL);
1038*4882a593Smuzhiyun 
1039*4882a593Smuzhiyun 	return vmw_cmdbuf_res_lookup(man, vmw_cmdbuf_res_shader,
1040*4882a593Smuzhiyun 				     vmw_shader_key(user_key, shader_type));
1041*4882a593Smuzhiyun }
1042*4882a593Smuzhiyun 
vmw_shader_define_ioctl(struct drm_device * dev,void * data,struct drm_file * file_priv)1043*4882a593Smuzhiyun int vmw_shader_define_ioctl(struct drm_device *dev, void *data,
1044*4882a593Smuzhiyun 			     struct drm_file *file_priv)
1045*4882a593Smuzhiyun {
1046*4882a593Smuzhiyun 	struct drm_vmw_shader_create_arg *arg =
1047*4882a593Smuzhiyun 		(struct drm_vmw_shader_create_arg *)data;
1048*4882a593Smuzhiyun 
1049*4882a593Smuzhiyun 	return vmw_shader_define(dev, file_priv, arg->shader_type,
1050*4882a593Smuzhiyun 				 arg->buffer_handle,
1051*4882a593Smuzhiyun 				 arg->size, arg->offset,
1052*4882a593Smuzhiyun 				 0, 0,
1053*4882a593Smuzhiyun 				 &arg->shader_handle);
1054*4882a593Smuzhiyun }
1055