1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0 OR MIT
2*4882a593Smuzhiyun /**************************************************************************
3*4882a593Smuzhiyun *
4*4882a593Smuzhiyun * Copyright 2009-2015 VMware, Inc., Palo Alto, CA., USA
5*4882a593Smuzhiyun *
6*4882a593Smuzhiyun * Permission is hereby granted, free of charge, to any person obtaining a
7*4882a593Smuzhiyun * copy of this software and associated documentation files (the
8*4882a593Smuzhiyun * "Software"), to deal in the Software without restriction, including
9*4882a593Smuzhiyun * without limitation the rights to use, copy, modify, merge, publish,
10*4882a593Smuzhiyun * distribute, sub license, and/or sell copies of the Software, and to
11*4882a593Smuzhiyun * permit persons to whom the Software is furnished to do so, subject to
12*4882a593Smuzhiyun * the following conditions:
13*4882a593Smuzhiyun *
14*4882a593Smuzhiyun * The above copyright notice and this permission notice (including the
15*4882a593Smuzhiyun * next paragraph) shall be included in all copies or substantial portions
16*4882a593Smuzhiyun * of the Software.
17*4882a593Smuzhiyun *
18*4882a593Smuzhiyun * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19*4882a593Smuzhiyun * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20*4882a593Smuzhiyun * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21*4882a593Smuzhiyun * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22*4882a593Smuzhiyun * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23*4882a593Smuzhiyun * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24*4882a593Smuzhiyun * USE OR OTHER DEALINGS IN THE SOFTWARE.
25*4882a593Smuzhiyun *
26*4882a593Smuzhiyun **************************************************************************/
27*4882a593Smuzhiyun
28*4882a593Smuzhiyun #include <drm/ttm/ttm_placement.h>
29*4882a593Smuzhiyun
30*4882a593Smuzhiyun #include "vmwgfx_drv.h"
31*4882a593Smuzhiyun #include "vmwgfx_resource_priv.h"
32*4882a593Smuzhiyun #include "vmwgfx_binding.h"
33*4882a593Smuzhiyun
34*4882a593Smuzhiyun struct vmw_user_context {
35*4882a593Smuzhiyun struct ttm_base_object base;
36*4882a593Smuzhiyun struct vmw_resource res;
37*4882a593Smuzhiyun struct vmw_ctx_binding_state *cbs;
38*4882a593Smuzhiyun struct vmw_cmdbuf_res_manager *man;
39*4882a593Smuzhiyun struct vmw_resource *cotables[SVGA_COTABLE_MAX];
40*4882a593Smuzhiyun spinlock_t cotable_lock;
41*4882a593Smuzhiyun struct vmw_buffer_object *dx_query_mob;
42*4882a593Smuzhiyun };
43*4882a593Smuzhiyun
44*4882a593Smuzhiyun static void vmw_user_context_free(struct vmw_resource *res);
45*4882a593Smuzhiyun static struct vmw_resource *
46*4882a593Smuzhiyun vmw_user_context_base_to_res(struct ttm_base_object *base);
47*4882a593Smuzhiyun
48*4882a593Smuzhiyun static int vmw_gb_context_create(struct vmw_resource *res);
49*4882a593Smuzhiyun static int vmw_gb_context_bind(struct vmw_resource *res,
50*4882a593Smuzhiyun struct ttm_validate_buffer *val_buf);
51*4882a593Smuzhiyun static int vmw_gb_context_unbind(struct vmw_resource *res,
52*4882a593Smuzhiyun bool readback,
53*4882a593Smuzhiyun struct ttm_validate_buffer *val_buf);
54*4882a593Smuzhiyun static int vmw_gb_context_destroy(struct vmw_resource *res);
55*4882a593Smuzhiyun static int vmw_dx_context_create(struct vmw_resource *res);
56*4882a593Smuzhiyun static int vmw_dx_context_bind(struct vmw_resource *res,
57*4882a593Smuzhiyun struct ttm_validate_buffer *val_buf);
58*4882a593Smuzhiyun static int vmw_dx_context_unbind(struct vmw_resource *res,
59*4882a593Smuzhiyun bool readback,
60*4882a593Smuzhiyun struct ttm_validate_buffer *val_buf);
61*4882a593Smuzhiyun static int vmw_dx_context_destroy(struct vmw_resource *res);
62*4882a593Smuzhiyun
63*4882a593Smuzhiyun static uint64_t vmw_user_context_size;
64*4882a593Smuzhiyun
65*4882a593Smuzhiyun static const struct vmw_user_resource_conv user_context_conv = {
66*4882a593Smuzhiyun .object_type = VMW_RES_CONTEXT,
67*4882a593Smuzhiyun .base_obj_to_res = vmw_user_context_base_to_res,
68*4882a593Smuzhiyun .res_free = vmw_user_context_free
69*4882a593Smuzhiyun };
70*4882a593Smuzhiyun
71*4882a593Smuzhiyun const struct vmw_user_resource_conv *user_context_converter =
72*4882a593Smuzhiyun &user_context_conv;
73*4882a593Smuzhiyun
74*4882a593Smuzhiyun
75*4882a593Smuzhiyun static const struct vmw_res_func vmw_legacy_context_func = {
76*4882a593Smuzhiyun .res_type = vmw_res_context,
77*4882a593Smuzhiyun .needs_backup = false,
78*4882a593Smuzhiyun .may_evict = false,
79*4882a593Smuzhiyun .type_name = "legacy contexts",
80*4882a593Smuzhiyun .backup_placement = NULL,
81*4882a593Smuzhiyun .create = NULL,
82*4882a593Smuzhiyun .destroy = NULL,
83*4882a593Smuzhiyun .bind = NULL,
84*4882a593Smuzhiyun .unbind = NULL
85*4882a593Smuzhiyun };
86*4882a593Smuzhiyun
87*4882a593Smuzhiyun static const struct vmw_res_func vmw_gb_context_func = {
88*4882a593Smuzhiyun .res_type = vmw_res_context,
89*4882a593Smuzhiyun .needs_backup = true,
90*4882a593Smuzhiyun .may_evict = true,
91*4882a593Smuzhiyun .prio = 3,
92*4882a593Smuzhiyun .dirty_prio = 3,
93*4882a593Smuzhiyun .type_name = "guest backed contexts",
94*4882a593Smuzhiyun .backup_placement = &vmw_mob_placement,
95*4882a593Smuzhiyun .create = vmw_gb_context_create,
96*4882a593Smuzhiyun .destroy = vmw_gb_context_destroy,
97*4882a593Smuzhiyun .bind = vmw_gb_context_bind,
98*4882a593Smuzhiyun .unbind = vmw_gb_context_unbind
99*4882a593Smuzhiyun };
100*4882a593Smuzhiyun
101*4882a593Smuzhiyun static const struct vmw_res_func vmw_dx_context_func = {
102*4882a593Smuzhiyun .res_type = vmw_res_dx_context,
103*4882a593Smuzhiyun .needs_backup = true,
104*4882a593Smuzhiyun .may_evict = true,
105*4882a593Smuzhiyun .prio = 3,
106*4882a593Smuzhiyun .dirty_prio = 3,
107*4882a593Smuzhiyun .type_name = "dx contexts",
108*4882a593Smuzhiyun .backup_placement = &vmw_mob_placement,
109*4882a593Smuzhiyun .create = vmw_dx_context_create,
110*4882a593Smuzhiyun .destroy = vmw_dx_context_destroy,
111*4882a593Smuzhiyun .bind = vmw_dx_context_bind,
112*4882a593Smuzhiyun .unbind = vmw_dx_context_unbind
113*4882a593Smuzhiyun };
114*4882a593Smuzhiyun
115*4882a593Smuzhiyun /**
116*4882a593Smuzhiyun * Context management:
117*4882a593Smuzhiyun */
118*4882a593Smuzhiyun
vmw_context_cotables_unref(struct vmw_private * dev_priv,struct vmw_user_context * uctx)119*4882a593Smuzhiyun static void vmw_context_cotables_unref(struct vmw_private *dev_priv,
120*4882a593Smuzhiyun struct vmw_user_context *uctx)
121*4882a593Smuzhiyun {
122*4882a593Smuzhiyun struct vmw_resource *res;
123*4882a593Smuzhiyun int i;
124*4882a593Smuzhiyun u32 cotable_max = has_sm5_context(dev_priv) ?
125*4882a593Smuzhiyun SVGA_COTABLE_MAX : SVGA_COTABLE_DX10_MAX;
126*4882a593Smuzhiyun
127*4882a593Smuzhiyun for (i = 0; i < cotable_max; ++i) {
128*4882a593Smuzhiyun spin_lock(&uctx->cotable_lock);
129*4882a593Smuzhiyun res = uctx->cotables[i];
130*4882a593Smuzhiyun uctx->cotables[i] = NULL;
131*4882a593Smuzhiyun spin_unlock(&uctx->cotable_lock);
132*4882a593Smuzhiyun
133*4882a593Smuzhiyun if (res)
134*4882a593Smuzhiyun vmw_resource_unreference(&res);
135*4882a593Smuzhiyun }
136*4882a593Smuzhiyun }
137*4882a593Smuzhiyun
vmw_hw_context_destroy(struct vmw_resource * res)138*4882a593Smuzhiyun static void vmw_hw_context_destroy(struct vmw_resource *res)
139*4882a593Smuzhiyun {
140*4882a593Smuzhiyun struct vmw_user_context *uctx =
141*4882a593Smuzhiyun container_of(res, struct vmw_user_context, res);
142*4882a593Smuzhiyun struct vmw_private *dev_priv = res->dev_priv;
143*4882a593Smuzhiyun struct {
144*4882a593Smuzhiyun SVGA3dCmdHeader header;
145*4882a593Smuzhiyun SVGA3dCmdDestroyContext body;
146*4882a593Smuzhiyun } *cmd;
147*4882a593Smuzhiyun
148*4882a593Smuzhiyun
149*4882a593Smuzhiyun if (res->func->destroy == vmw_gb_context_destroy ||
150*4882a593Smuzhiyun res->func->destroy == vmw_dx_context_destroy) {
151*4882a593Smuzhiyun mutex_lock(&dev_priv->cmdbuf_mutex);
152*4882a593Smuzhiyun vmw_cmdbuf_res_man_destroy(uctx->man);
153*4882a593Smuzhiyun mutex_lock(&dev_priv->binding_mutex);
154*4882a593Smuzhiyun vmw_binding_state_kill(uctx->cbs);
155*4882a593Smuzhiyun (void) res->func->destroy(res);
156*4882a593Smuzhiyun mutex_unlock(&dev_priv->binding_mutex);
157*4882a593Smuzhiyun if (dev_priv->pinned_bo != NULL &&
158*4882a593Smuzhiyun !dev_priv->query_cid_valid)
159*4882a593Smuzhiyun __vmw_execbuf_release_pinned_bo(dev_priv, NULL);
160*4882a593Smuzhiyun mutex_unlock(&dev_priv->cmdbuf_mutex);
161*4882a593Smuzhiyun vmw_context_cotables_unref(dev_priv, uctx);
162*4882a593Smuzhiyun return;
163*4882a593Smuzhiyun }
164*4882a593Smuzhiyun
165*4882a593Smuzhiyun vmw_execbuf_release_pinned_bo(dev_priv);
166*4882a593Smuzhiyun cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
167*4882a593Smuzhiyun if (unlikely(cmd == NULL))
168*4882a593Smuzhiyun return;
169*4882a593Smuzhiyun
170*4882a593Smuzhiyun cmd->header.id = SVGA_3D_CMD_CONTEXT_DESTROY;
171*4882a593Smuzhiyun cmd->header.size = sizeof(cmd->body);
172*4882a593Smuzhiyun cmd->body.cid = res->id;
173*4882a593Smuzhiyun
174*4882a593Smuzhiyun vmw_fifo_commit(dev_priv, sizeof(*cmd));
175*4882a593Smuzhiyun vmw_fifo_resource_dec(dev_priv);
176*4882a593Smuzhiyun }
177*4882a593Smuzhiyun
vmw_gb_context_init(struct vmw_private * dev_priv,bool dx,struct vmw_resource * res,void (* res_free)(struct vmw_resource * res))178*4882a593Smuzhiyun static int vmw_gb_context_init(struct vmw_private *dev_priv,
179*4882a593Smuzhiyun bool dx,
180*4882a593Smuzhiyun struct vmw_resource *res,
181*4882a593Smuzhiyun void (*res_free)(struct vmw_resource *res))
182*4882a593Smuzhiyun {
183*4882a593Smuzhiyun int ret, i;
184*4882a593Smuzhiyun struct vmw_user_context *uctx =
185*4882a593Smuzhiyun container_of(res, struct vmw_user_context, res);
186*4882a593Smuzhiyun
187*4882a593Smuzhiyun res->backup_size = (dx ? sizeof(SVGADXContextMobFormat) :
188*4882a593Smuzhiyun SVGA3D_CONTEXT_DATA_SIZE);
189*4882a593Smuzhiyun ret = vmw_resource_init(dev_priv, res, true,
190*4882a593Smuzhiyun res_free,
191*4882a593Smuzhiyun dx ? &vmw_dx_context_func :
192*4882a593Smuzhiyun &vmw_gb_context_func);
193*4882a593Smuzhiyun if (unlikely(ret != 0))
194*4882a593Smuzhiyun goto out_err;
195*4882a593Smuzhiyun
196*4882a593Smuzhiyun if (dev_priv->has_mob) {
197*4882a593Smuzhiyun uctx->man = vmw_cmdbuf_res_man_create(dev_priv);
198*4882a593Smuzhiyun if (IS_ERR(uctx->man)) {
199*4882a593Smuzhiyun ret = PTR_ERR(uctx->man);
200*4882a593Smuzhiyun uctx->man = NULL;
201*4882a593Smuzhiyun goto out_err;
202*4882a593Smuzhiyun }
203*4882a593Smuzhiyun }
204*4882a593Smuzhiyun
205*4882a593Smuzhiyun uctx->cbs = vmw_binding_state_alloc(dev_priv);
206*4882a593Smuzhiyun if (IS_ERR(uctx->cbs)) {
207*4882a593Smuzhiyun ret = PTR_ERR(uctx->cbs);
208*4882a593Smuzhiyun goto out_err;
209*4882a593Smuzhiyun }
210*4882a593Smuzhiyun
211*4882a593Smuzhiyun spin_lock_init(&uctx->cotable_lock);
212*4882a593Smuzhiyun
213*4882a593Smuzhiyun if (dx) {
214*4882a593Smuzhiyun u32 cotable_max = has_sm5_context(dev_priv) ?
215*4882a593Smuzhiyun SVGA_COTABLE_MAX : SVGA_COTABLE_DX10_MAX;
216*4882a593Smuzhiyun for (i = 0; i < cotable_max; ++i) {
217*4882a593Smuzhiyun uctx->cotables[i] = vmw_cotable_alloc(dev_priv,
218*4882a593Smuzhiyun &uctx->res, i);
219*4882a593Smuzhiyun if (IS_ERR(uctx->cotables[i])) {
220*4882a593Smuzhiyun ret = PTR_ERR(uctx->cotables[i]);
221*4882a593Smuzhiyun goto out_cotables;
222*4882a593Smuzhiyun }
223*4882a593Smuzhiyun }
224*4882a593Smuzhiyun }
225*4882a593Smuzhiyun
226*4882a593Smuzhiyun res->hw_destroy = vmw_hw_context_destroy;
227*4882a593Smuzhiyun return 0;
228*4882a593Smuzhiyun
229*4882a593Smuzhiyun out_cotables:
230*4882a593Smuzhiyun vmw_context_cotables_unref(dev_priv, uctx);
231*4882a593Smuzhiyun out_err:
232*4882a593Smuzhiyun if (res_free)
233*4882a593Smuzhiyun res_free(res);
234*4882a593Smuzhiyun else
235*4882a593Smuzhiyun kfree(res);
236*4882a593Smuzhiyun return ret;
237*4882a593Smuzhiyun }
238*4882a593Smuzhiyun
vmw_context_init(struct vmw_private * dev_priv,struct vmw_resource * res,void (* res_free)(struct vmw_resource * res),bool dx)239*4882a593Smuzhiyun static int vmw_context_init(struct vmw_private *dev_priv,
240*4882a593Smuzhiyun struct vmw_resource *res,
241*4882a593Smuzhiyun void (*res_free)(struct vmw_resource *res),
242*4882a593Smuzhiyun bool dx)
243*4882a593Smuzhiyun {
244*4882a593Smuzhiyun int ret;
245*4882a593Smuzhiyun
246*4882a593Smuzhiyun struct {
247*4882a593Smuzhiyun SVGA3dCmdHeader header;
248*4882a593Smuzhiyun SVGA3dCmdDefineContext body;
249*4882a593Smuzhiyun } *cmd;
250*4882a593Smuzhiyun
251*4882a593Smuzhiyun if (dev_priv->has_mob)
252*4882a593Smuzhiyun return vmw_gb_context_init(dev_priv, dx, res, res_free);
253*4882a593Smuzhiyun
254*4882a593Smuzhiyun ret = vmw_resource_init(dev_priv, res, false,
255*4882a593Smuzhiyun res_free, &vmw_legacy_context_func);
256*4882a593Smuzhiyun
257*4882a593Smuzhiyun if (unlikely(ret != 0)) {
258*4882a593Smuzhiyun DRM_ERROR("Failed to allocate a resource id.\n");
259*4882a593Smuzhiyun goto out_early;
260*4882a593Smuzhiyun }
261*4882a593Smuzhiyun
262*4882a593Smuzhiyun if (unlikely(res->id >= SVGA3D_MAX_CONTEXT_IDS)) {
263*4882a593Smuzhiyun DRM_ERROR("Out of hw context ids.\n");
264*4882a593Smuzhiyun vmw_resource_unreference(&res);
265*4882a593Smuzhiyun return -ENOMEM;
266*4882a593Smuzhiyun }
267*4882a593Smuzhiyun
268*4882a593Smuzhiyun cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
269*4882a593Smuzhiyun if (unlikely(cmd == NULL)) {
270*4882a593Smuzhiyun vmw_resource_unreference(&res);
271*4882a593Smuzhiyun return -ENOMEM;
272*4882a593Smuzhiyun }
273*4882a593Smuzhiyun
274*4882a593Smuzhiyun cmd->header.id = SVGA_3D_CMD_CONTEXT_DEFINE;
275*4882a593Smuzhiyun cmd->header.size = sizeof(cmd->body);
276*4882a593Smuzhiyun cmd->body.cid = res->id;
277*4882a593Smuzhiyun
278*4882a593Smuzhiyun vmw_fifo_commit(dev_priv, sizeof(*cmd));
279*4882a593Smuzhiyun vmw_fifo_resource_inc(dev_priv);
280*4882a593Smuzhiyun res->hw_destroy = vmw_hw_context_destroy;
281*4882a593Smuzhiyun return 0;
282*4882a593Smuzhiyun
283*4882a593Smuzhiyun out_early:
284*4882a593Smuzhiyun if (res_free == NULL)
285*4882a593Smuzhiyun kfree(res);
286*4882a593Smuzhiyun else
287*4882a593Smuzhiyun res_free(res);
288*4882a593Smuzhiyun return ret;
289*4882a593Smuzhiyun }
290*4882a593Smuzhiyun
291*4882a593Smuzhiyun
292*4882a593Smuzhiyun /*
293*4882a593Smuzhiyun * GB context.
294*4882a593Smuzhiyun */
295*4882a593Smuzhiyun
vmw_gb_context_create(struct vmw_resource * res)296*4882a593Smuzhiyun static int vmw_gb_context_create(struct vmw_resource *res)
297*4882a593Smuzhiyun {
298*4882a593Smuzhiyun struct vmw_private *dev_priv = res->dev_priv;
299*4882a593Smuzhiyun int ret;
300*4882a593Smuzhiyun struct {
301*4882a593Smuzhiyun SVGA3dCmdHeader header;
302*4882a593Smuzhiyun SVGA3dCmdDefineGBContext body;
303*4882a593Smuzhiyun } *cmd;
304*4882a593Smuzhiyun
305*4882a593Smuzhiyun if (likely(res->id != -1))
306*4882a593Smuzhiyun return 0;
307*4882a593Smuzhiyun
308*4882a593Smuzhiyun ret = vmw_resource_alloc_id(res);
309*4882a593Smuzhiyun if (unlikely(ret != 0)) {
310*4882a593Smuzhiyun DRM_ERROR("Failed to allocate a context id.\n");
311*4882a593Smuzhiyun goto out_no_id;
312*4882a593Smuzhiyun }
313*4882a593Smuzhiyun
314*4882a593Smuzhiyun if (unlikely(res->id >= VMWGFX_NUM_GB_CONTEXT)) {
315*4882a593Smuzhiyun ret = -EBUSY;
316*4882a593Smuzhiyun goto out_no_fifo;
317*4882a593Smuzhiyun }
318*4882a593Smuzhiyun
319*4882a593Smuzhiyun cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
320*4882a593Smuzhiyun if (unlikely(cmd == NULL)) {
321*4882a593Smuzhiyun ret = -ENOMEM;
322*4882a593Smuzhiyun goto out_no_fifo;
323*4882a593Smuzhiyun }
324*4882a593Smuzhiyun
325*4882a593Smuzhiyun cmd->header.id = SVGA_3D_CMD_DEFINE_GB_CONTEXT;
326*4882a593Smuzhiyun cmd->header.size = sizeof(cmd->body);
327*4882a593Smuzhiyun cmd->body.cid = res->id;
328*4882a593Smuzhiyun vmw_fifo_commit(dev_priv, sizeof(*cmd));
329*4882a593Smuzhiyun vmw_fifo_resource_inc(dev_priv);
330*4882a593Smuzhiyun
331*4882a593Smuzhiyun return 0;
332*4882a593Smuzhiyun
333*4882a593Smuzhiyun out_no_fifo:
334*4882a593Smuzhiyun vmw_resource_release_id(res);
335*4882a593Smuzhiyun out_no_id:
336*4882a593Smuzhiyun return ret;
337*4882a593Smuzhiyun }
338*4882a593Smuzhiyun
vmw_gb_context_bind(struct vmw_resource * res,struct ttm_validate_buffer * val_buf)339*4882a593Smuzhiyun static int vmw_gb_context_bind(struct vmw_resource *res,
340*4882a593Smuzhiyun struct ttm_validate_buffer *val_buf)
341*4882a593Smuzhiyun {
342*4882a593Smuzhiyun struct vmw_private *dev_priv = res->dev_priv;
343*4882a593Smuzhiyun struct {
344*4882a593Smuzhiyun SVGA3dCmdHeader header;
345*4882a593Smuzhiyun SVGA3dCmdBindGBContext body;
346*4882a593Smuzhiyun } *cmd;
347*4882a593Smuzhiyun struct ttm_buffer_object *bo = val_buf->bo;
348*4882a593Smuzhiyun
349*4882a593Smuzhiyun BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
350*4882a593Smuzhiyun
351*4882a593Smuzhiyun cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
352*4882a593Smuzhiyun if (unlikely(cmd == NULL))
353*4882a593Smuzhiyun return -ENOMEM;
354*4882a593Smuzhiyun
355*4882a593Smuzhiyun cmd->header.id = SVGA_3D_CMD_BIND_GB_CONTEXT;
356*4882a593Smuzhiyun cmd->header.size = sizeof(cmd->body);
357*4882a593Smuzhiyun cmd->body.cid = res->id;
358*4882a593Smuzhiyun cmd->body.mobid = bo->mem.start;
359*4882a593Smuzhiyun cmd->body.validContents = res->backup_dirty;
360*4882a593Smuzhiyun res->backup_dirty = false;
361*4882a593Smuzhiyun vmw_fifo_commit(dev_priv, sizeof(*cmd));
362*4882a593Smuzhiyun
363*4882a593Smuzhiyun return 0;
364*4882a593Smuzhiyun }
365*4882a593Smuzhiyun
vmw_gb_context_unbind(struct vmw_resource * res,bool readback,struct ttm_validate_buffer * val_buf)366*4882a593Smuzhiyun static int vmw_gb_context_unbind(struct vmw_resource *res,
367*4882a593Smuzhiyun bool readback,
368*4882a593Smuzhiyun struct ttm_validate_buffer *val_buf)
369*4882a593Smuzhiyun {
370*4882a593Smuzhiyun struct vmw_private *dev_priv = res->dev_priv;
371*4882a593Smuzhiyun struct ttm_buffer_object *bo = val_buf->bo;
372*4882a593Smuzhiyun struct vmw_fence_obj *fence;
373*4882a593Smuzhiyun struct vmw_user_context *uctx =
374*4882a593Smuzhiyun container_of(res, struct vmw_user_context, res);
375*4882a593Smuzhiyun
376*4882a593Smuzhiyun struct {
377*4882a593Smuzhiyun SVGA3dCmdHeader header;
378*4882a593Smuzhiyun SVGA3dCmdReadbackGBContext body;
379*4882a593Smuzhiyun } *cmd1;
380*4882a593Smuzhiyun struct {
381*4882a593Smuzhiyun SVGA3dCmdHeader header;
382*4882a593Smuzhiyun SVGA3dCmdBindGBContext body;
383*4882a593Smuzhiyun } *cmd2;
384*4882a593Smuzhiyun uint32_t submit_size;
385*4882a593Smuzhiyun uint8_t *cmd;
386*4882a593Smuzhiyun
387*4882a593Smuzhiyun
388*4882a593Smuzhiyun BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
389*4882a593Smuzhiyun
390*4882a593Smuzhiyun mutex_lock(&dev_priv->binding_mutex);
391*4882a593Smuzhiyun vmw_binding_state_scrub(uctx->cbs);
392*4882a593Smuzhiyun
393*4882a593Smuzhiyun submit_size = sizeof(*cmd2) + (readback ? sizeof(*cmd1) : 0);
394*4882a593Smuzhiyun
395*4882a593Smuzhiyun cmd = VMW_FIFO_RESERVE(dev_priv, submit_size);
396*4882a593Smuzhiyun if (unlikely(cmd == NULL)) {
397*4882a593Smuzhiyun mutex_unlock(&dev_priv->binding_mutex);
398*4882a593Smuzhiyun return -ENOMEM;
399*4882a593Smuzhiyun }
400*4882a593Smuzhiyun
401*4882a593Smuzhiyun cmd2 = (void *) cmd;
402*4882a593Smuzhiyun if (readback) {
403*4882a593Smuzhiyun cmd1 = (void *) cmd;
404*4882a593Smuzhiyun cmd1->header.id = SVGA_3D_CMD_READBACK_GB_CONTEXT;
405*4882a593Smuzhiyun cmd1->header.size = sizeof(cmd1->body);
406*4882a593Smuzhiyun cmd1->body.cid = res->id;
407*4882a593Smuzhiyun cmd2 = (void *) (&cmd1[1]);
408*4882a593Smuzhiyun }
409*4882a593Smuzhiyun cmd2->header.id = SVGA_3D_CMD_BIND_GB_CONTEXT;
410*4882a593Smuzhiyun cmd2->header.size = sizeof(cmd2->body);
411*4882a593Smuzhiyun cmd2->body.cid = res->id;
412*4882a593Smuzhiyun cmd2->body.mobid = SVGA3D_INVALID_ID;
413*4882a593Smuzhiyun
414*4882a593Smuzhiyun vmw_fifo_commit(dev_priv, submit_size);
415*4882a593Smuzhiyun mutex_unlock(&dev_priv->binding_mutex);
416*4882a593Smuzhiyun
417*4882a593Smuzhiyun /*
418*4882a593Smuzhiyun * Create a fence object and fence the backup buffer.
419*4882a593Smuzhiyun */
420*4882a593Smuzhiyun
421*4882a593Smuzhiyun (void) vmw_execbuf_fence_commands(NULL, dev_priv,
422*4882a593Smuzhiyun &fence, NULL);
423*4882a593Smuzhiyun
424*4882a593Smuzhiyun vmw_bo_fence_single(bo, fence);
425*4882a593Smuzhiyun
426*4882a593Smuzhiyun if (likely(fence != NULL))
427*4882a593Smuzhiyun vmw_fence_obj_unreference(&fence);
428*4882a593Smuzhiyun
429*4882a593Smuzhiyun return 0;
430*4882a593Smuzhiyun }
431*4882a593Smuzhiyun
vmw_gb_context_destroy(struct vmw_resource * res)432*4882a593Smuzhiyun static int vmw_gb_context_destroy(struct vmw_resource *res)
433*4882a593Smuzhiyun {
434*4882a593Smuzhiyun struct vmw_private *dev_priv = res->dev_priv;
435*4882a593Smuzhiyun struct {
436*4882a593Smuzhiyun SVGA3dCmdHeader header;
437*4882a593Smuzhiyun SVGA3dCmdDestroyGBContext body;
438*4882a593Smuzhiyun } *cmd;
439*4882a593Smuzhiyun
440*4882a593Smuzhiyun if (likely(res->id == -1))
441*4882a593Smuzhiyun return 0;
442*4882a593Smuzhiyun
443*4882a593Smuzhiyun cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
444*4882a593Smuzhiyun if (unlikely(cmd == NULL))
445*4882a593Smuzhiyun return -ENOMEM;
446*4882a593Smuzhiyun
447*4882a593Smuzhiyun cmd->header.id = SVGA_3D_CMD_DESTROY_GB_CONTEXT;
448*4882a593Smuzhiyun cmd->header.size = sizeof(cmd->body);
449*4882a593Smuzhiyun cmd->body.cid = res->id;
450*4882a593Smuzhiyun vmw_fifo_commit(dev_priv, sizeof(*cmd));
451*4882a593Smuzhiyun if (dev_priv->query_cid == res->id)
452*4882a593Smuzhiyun dev_priv->query_cid_valid = false;
453*4882a593Smuzhiyun vmw_resource_release_id(res);
454*4882a593Smuzhiyun vmw_fifo_resource_dec(dev_priv);
455*4882a593Smuzhiyun
456*4882a593Smuzhiyun return 0;
457*4882a593Smuzhiyun }
458*4882a593Smuzhiyun
459*4882a593Smuzhiyun /*
460*4882a593Smuzhiyun * DX context.
461*4882a593Smuzhiyun */
462*4882a593Smuzhiyun
vmw_dx_context_create(struct vmw_resource * res)463*4882a593Smuzhiyun static int vmw_dx_context_create(struct vmw_resource *res)
464*4882a593Smuzhiyun {
465*4882a593Smuzhiyun struct vmw_private *dev_priv = res->dev_priv;
466*4882a593Smuzhiyun int ret;
467*4882a593Smuzhiyun struct {
468*4882a593Smuzhiyun SVGA3dCmdHeader header;
469*4882a593Smuzhiyun SVGA3dCmdDXDefineContext body;
470*4882a593Smuzhiyun } *cmd;
471*4882a593Smuzhiyun
472*4882a593Smuzhiyun if (likely(res->id != -1))
473*4882a593Smuzhiyun return 0;
474*4882a593Smuzhiyun
475*4882a593Smuzhiyun ret = vmw_resource_alloc_id(res);
476*4882a593Smuzhiyun if (unlikely(ret != 0)) {
477*4882a593Smuzhiyun DRM_ERROR("Failed to allocate a context id.\n");
478*4882a593Smuzhiyun goto out_no_id;
479*4882a593Smuzhiyun }
480*4882a593Smuzhiyun
481*4882a593Smuzhiyun if (unlikely(res->id >= VMWGFX_NUM_DXCONTEXT)) {
482*4882a593Smuzhiyun ret = -EBUSY;
483*4882a593Smuzhiyun goto out_no_fifo;
484*4882a593Smuzhiyun }
485*4882a593Smuzhiyun
486*4882a593Smuzhiyun cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
487*4882a593Smuzhiyun if (unlikely(cmd == NULL)) {
488*4882a593Smuzhiyun ret = -ENOMEM;
489*4882a593Smuzhiyun goto out_no_fifo;
490*4882a593Smuzhiyun }
491*4882a593Smuzhiyun
492*4882a593Smuzhiyun cmd->header.id = SVGA_3D_CMD_DX_DEFINE_CONTEXT;
493*4882a593Smuzhiyun cmd->header.size = sizeof(cmd->body);
494*4882a593Smuzhiyun cmd->body.cid = res->id;
495*4882a593Smuzhiyun vmw_fifo_commit(dev_priv, sizeof(*cmd));
496*4882a593Smuzhiyun vmw_fifo_resource_inc(dev_priv);
497*4882a593Smuzhiyun
498*4882a593Smuzhiyun return 0;
499*4882a593Smuzhiyun
500*4882a593Smuzhiyun out_no_fifo:
501*4882a593Smuzhiyun vmw_resource_release_id(res);
502*4882a593Smuzhiyun out_no_id:
503*4882a593Smuzhiyun return ret;
504*4882a593Smuzhiyun }
505*4882a593Smuzhiyun
vmw_dx_context_bind(struct vmw_resource * res,struct ttm_validate_buffer * val_buf)506*4882a593Smuzhiyun static int vmw_dx_context_bind(struct vmw_resource *res,
507*4882a593Smuzhiyun struct ttm_validate_buffer *val_buf)
508*4882a593Smuzhiyun {
509*4882a593Smuzhiyun struct vmw_private *dev_priv = res->dev_priv;
510*4882a593Smuzhiyun struct {
511*4882a593Smuzhiyun SVGA3dCmdHeader header;
512*4882a593Smuzhiyun SVGA3dCmdDXBindContext body;
513*4882a593Smuzhiyun } *cmd;
514*4882a593Smuzhiyun struct ttm_buffer_object *bo = val_buf->bo;
515*4882a593Smuzhiyun
516*4882a593Smuzhiyun BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
517*4882a593Smuzhiyun
518*4882a593Smuzhiyun cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
519*4882a593Smuzhiyun if (unlikely(cmd == NULL))
520*4882a593Smuzhiyun return -ENOMEM;
521*4882a593Smuzhiyun
522*4882a593Smuzhiyun cmd->header.id = SVGA_3D_CMD_DX_BIND_CONTEXT;
523*4882a593Smuzhiyun cmd->header.size = sizeof(cmd->body);
524*4882a593Smuzhiyun cmd->body.cid = res->id;
525*4882a593Smuzhiyun cmd->body.mobid = bo->mem.start;
526*4882a593Smuzhiyun cmd->body.validContents = res->backup_dirty;
527*4882a593Smuzhiyun res->backup_dirty = false;
528*4882a593Smuzhiyun vmw_fifo_commit(dev_priv, sizeof(*cmd));
529*4882a593Smuzhiyun
530*4882a593Smuzhiyun
531*4882a593Smuzhiyun return 0;
532*4882a593Smuzhiyun }
533*4882a593Smuzhiyun
534*4882a593Smuzhiyun /**
535*4882a593Smuzhiyun * vmw_dx_context_scrub_cotables - Scrub all bindings and
536*4882a593Smuzhiyun * cotables from a context
537*4882a593Smuzhiyun *
538*4882a593Smuzhiyun * @ctx: Pointer to the context resource
539*4882a593Smuzhiyun * @readback: Whether to save the otable contents on scrubbing.
540*4882a593Smuzhiyun *
541*4882a593Smuzhiyun * COtables must be unbound before their context, but unbinding requires
542*4882a593Smuzhiyun * the backup buffer being reserved, whereas scrubbing does not.
543*4882a593Smuzhiyun * This function scrubs all cotables of a context, potentially reading back
544*4882a593Smuzhiyun * the contents into their backup buffers. However, scrubbing cotables
545*4882a593Smuzhiyun * also makes the device context invalid, so scrub all bindings first so
546*4882a593Smuzhiyun * that doesn't have to be done later with an invalid context.
547*4882a593Smuzhiyun */
vmw_dx_context_scrub_cotables(struct vmw_resource * ctx,bool readback)548*4882a593Smuzhiyun void vmw_dx_context_scrub_cotables(struct vmw_resource *ctx,
549*4882a593Smuzhiyun bool readback)
550*4882a593Smuzhiyun {
551*4882a593Smuzhiyun struct vmw_user_context *uctx =
552*4882a593Smuzhiyun container_of(ctx, struct vmw_user_context, res);
553*4882a593Smuzhiyun u32 cotable_max = has_sm5_context(ctx->dev_priv) ?
554*4882a593Smuzhiyun SVGA_COTABLE_MAX : SVGA_COTABLE_DX10_MAX;
555*4882a593Smuzhiyun int i;
556*4882a593Smuzhiyun
557*4882a593Smuzhiyun vmw_binding_state_scrub(uctx->cbs);
558*4882a593Smuzhiyun for (i = 0; i < cotable_max; ++i) {
559*4882a593Smuzhiyun struct vmw_resource *res;
560*4882a593Smuzhiyun
561*4882a593Smuzhiyun /* Avoid racing with ongoing cotable destruction. */
562*4882a593Smuzhiyun spin_lock(&uctx->cotable_lock);
563*4882a593Smuzhiyun res = uctx->cotables[vmw_cotable_scrub_order[i]];
564*4882a593Smuzhiyun if (res)
565*4882a593Smuzhiyun res = vmw_resource_reference_unless_doomed(res);
566*4882a593Smuzhiyun spin_unlock(&uctx->cotable_lock);
567*4882a593Smuzhiyun if (!res)
568*4882a593Smuzhiyun continue;
569*4882a593Smuzhiyun
570*4882a593Smuzhiyun WARN_ON(vmw_cotable_scrub(res, readback));
571*4882a593Smuzhiyun vmw_resource_unreference(&res);
572*4882a593Smuzhiyun }
573*4882a593Smuzhiyun }
574*4882a593Smuzhiyun
vmw_dx_context_unbind(struct vmw_resource * res,bool readback,struct ttm_validate_buffer * val_buf)575*4882a593Smuzhiyun static int vmw_dx_context_unbind(struct vmw_resource *res,
576*4882a593Smuzhiyun bool readback,
577*4882a593Smuzhiyun struct ttm_validate_buffer *val_buf)
578*4882a593Smuzhiyun {
579*4882a593Smuzhiyun struct vmw_private *dev_priv = res->dev_priv;
580*4882a593Smuzhiyun struct ttm_buffer_object *bo = val_buf->bo;
581*4882a593Smuzhiyun struct vmw_fence_obj *fence;
582*4882a593Smuzhiyun struct vmw_user_context *uctx =
583*4882a593Smuzhiyun container_of(res, struct vmw_user_context, res);
584*4882a593Smuzhiyun
585*4882a593Smuzhiyun struct {
586*4882a593Smuzhiyun SVGA3dCmdHeader header;
587*4882a593Smuzhiyun SVGA3dCmdDXReadbackContext body;
588*4882a593Smuzhiyun } *cmd1;
589*4882a593Smuzhiyun struct {
590*4882a593Smuzhiyun SVGA3dCmdHeader header;
591*4882a593Smuzhiyun SVGA3dCmdDXBindContext body;
592*4882a593Smuzhiyun } *cmd2;
593*4882a593Smuzhiyun uint32_t submit_size;
594*4882a593Smuzhiyun uint8_t *cmd;
595*4882a593Smuzhiyun
596*4882a593Smuzhiyun
597*4882a593Smuzhiyun BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
598*4882a593Smuzhiyun
599*4882a593Smuzhiyun mutex_lock(&dev_priv->binding_mutex);
600*4882a593Smuzhiyun vmw_dx_context_scrub_cotables(res, readback);
601*4882a593Smuzhiyun
602*4882a593Smuzhiyun if (uctx->dx_query_mob && uctx->dx_query_mob->dx_query_ctx &&
603*4882a593Smuzhiyun readback) {
604*4882a593Smuzhiyun WARN_ON(uctx->dx_query_mob->dx_query_ctx != res);
605*4882a593Smuzhiyun if (vmw_query_readback_all(uctx->dx_query_mob))
606*4882a593Smuzhiyun DRM_ERROR("Failed to read back query states\n");
607*4882a593Smuzhiyun }
608*4882a593Smuzhiyun
609*4882a593Smuzhiyun submit_size = sizeof(*cmd2) + (readback ? sizeof(*cmd1) : 0);
610*4882a593Smuzhiyun
611*4882a593Smuzhiyun cmd = VMW_FIFO_RESERVE(dev_priv, submit_size);
612*4882a593Smuzhiyun if (unlikely(cmd == NULL)) {
613*4882a593Smuzhiyun mutex_unlock(&dev_priv->binding_mutex);
614*4882a593Smuzhiyun return -ENOMEM;
615*4882a593Smuzhiyun }
616*4882a593Smuzhiyun
617*4882a593Smuzhiyun cmd2 = (void *) cmd;
618*4882a593Smuzhiyun if (readback) {
619*4882a593Smuzhiyun cmd1 = (void *) cmd;
620*4882a593Smuzhiyun cmd1->header.id = SVGA_3D_CMD_DX_READBACK_CONTEXT;
621*4882a593Smuzhiyun cmd1->header.size = sizeof(cmd1->body);
622*4882a593Smuzhiyun cmd1->body.cid = res->id;
623*4882a593Smuzhiyun cmd2 = (void *) (&cmd1[1]);
624*4882a593Smuzhiyun }
625*4882a593Smuzhiyun cmd2->header.id = SVGA_3D_CMD_DX_BIND_CONTEXT;
626*4882a593Smuzhiyun cmd2->header.size = sizeof(cmd2->body);
627*4882a593Smuzhiyun cmd2->body.cid = res->id;
628*4882a593Smuzhiyun cmd2->body.mobid = SVGA3D_INVALID_ID;
629*4882a593Smuzhiyun
630*4882a593Smuzhiyun vmw_fifo_commit(dev_priv, submit_size);
631*4882a593Smuzhiyun mutex_unlock(&dev_priv->binding_mutex);
632*4882a593Smuzhiyun
633*4882a593Smuzhiyun /*
634*4882a593Smuzhiyun * Create a fence object and fence the backup buffer.
635*4882a593Smuzhiyun */
636*4882a593Smuzhiyun
637*4882a593Smuzhiyun (void) vmw_execbuf_fence_commands(NULL, dev_priv,
638*4882a593Smuzhiyun &fence, NULL);
639*4882a593Smuzhiyun
640*4882a593Smuzhiyun vmw_bo_fence_single(bo, fence);
641*4882a593Smuzhiyun
642*4882a593Smuzhiyun if (likely(fence != NULL))
643*4882a593Smuzhiyun vmw_fence_obj_unreference(&fence);
644*4882a593Smuzhiyun
645*4882a593Smuzhiyun return 0;
646*4882a593Smuzhiyun }
647*4882a593Smuzhiyun
vmw_dx_context_destroy(struct vmw_resource * res)648*4882a593Smuzhiyun static int vmw_dx_context_destroy(struct vmw_resource *res)
649*4882a593Smuzhiyun {
650*4882a593Smuzhiyun struct vmw_private *dev_priv = res->dev_priv;
651*4882a593Smuzhiyun struct {
652*4882a593Smuzhiyun SVGA3dCmdHeader header;
653*4882a593Smuzhiyun SVGA3dCmdDXDestroyContext body;
654*4882a593Smuzhiyun } *cmd;
655*4882a593Smuzhiyun
656*4882a593Smuzhiyun if (likely(res->id == -1))
657*4882a593Smuzhiyun return 0;
658*4882a593Smuzhiyun
659*4882a593Smuzhiyun cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
660*4882a593Smuzhiyun if (unlikely(cmd == NULL))
661*4882a593Smuzhiyun return -ENOMEM;
662*4882a593Smuzhiyun
663*4882a593Smuzhiyun cmd->header.id = SVGA_3D_CMD_DX_DESTROY_CONTEXT;
664*4882a593Smuzhiyun cmd->header.size = sizeof(cmd->body);
665*4882a593Smuzhiyun cmd->body.cid = res->id;
666*4882a593Smuzhiyun vmw_fifo_commit(dev_priv, sizeof(*cmd));
667*4882a593Smuzhiyun if (dev_priv->query_cid == res->id)
668*4882a593Smuzhiyun dev_priv->query_cid_valid = false;
669*4882a593Smuzhiyun vmw_resource_release_id(res);
670*4882a593Smuzhiyun vmw_fifo_resource_dec(dev_priv);
671*4882a593Smuzhiyun
672*4882a593Smuzhiyun return 0;
673*4882a593Smuzhiyun }
674*4882a593Smuzhiyun
675*4882a593Smuzhiyun /**
676*4882a593Smuzhiyun * User-space context management:
677*4882a593Smuzhiyun */
678*4882a593Smuzhiyun
679*4882a593Smuzhiyun static struct vmw_resource *
vmw_user_context_base_to_res(struct ttm_base_object * base)680*4882a593Smuzhiyun vmw_user_context_base_to_res(struct ttm_base_object *base)
681*4882a593Smuzhiyun {
682*4882a593Smuzhiyun return &(container_of(base, struct vmw_user_context, base)->res);
683*4882a593Smuzhiyun }
684*4882a593Smuzhiyun
vmw_user_context_free(struct vmw_resource * res)685*4882a593Smuzhiyun static void vmw_user_context_free(struct vmw_resource *res)
686*4882a593Smuzhiyun {
687*4882a593Smuzhiyun struct vmw_user_context *ctx =
688*4882a593Smuzhiyun container_of(res, struct vmw_user_context, res);
689*4882a593Smuzhiyun struct vmw_private *dev_priv = res->dev_priv;
690*4882a593Smuzhiyun
691*4882a593Smuzhiyun if (ctx->cbs)
692*4882a593Smuzhiyun vmw_binding_state_free(ctx->cbs);
693*4882a593Smuzhiyun
694*4882a593Smuzhiyun (void) vmw_context_bind_dx_query(res, NULL);
695*4882a593Smuzhiyun
696*4882a593Smuzhiyun ttm_base_object_kfree(ctx, base);
697*4882a593Smuzhiyun ttm_mem_global_free(vmw_mem_glob(dev_priv),
698*4882a593Smuzhiyun vmw_user_context_size);
699*4882a593Smuzhiyun }
700*4882a593Smuzhiyun
701*4882a593Smuzhiyun /**
702*4882a593Smuzhiyun * This function is called when user space has no more references on the
703*4882a593Smuzhiyun * base object. It releases the base-object's reference on the resource object.
704*4882a593Smuzhiyun */
705*4882a593Smuzhiyun
vmw_user_context_base_release(struct ttm_base_object ** p_base)706*4882a593Smuzhiyun static void vmw_user_context_base_release(struct ttm_base_object **p_base)
707*4882a593Smuzhiyun {
708*4882a593Smuzhiyun struct ttm_base_object *base = *p_base;
709*4882a593Smuzhiyun struct vmw_user_context *ctx =
710*4882a593Smuzhiyun container_of(base, struct vmw_user_context, base);
711*4882a593Smuzhiyun struct vmw_resource *res = &ctx->res;
712*4882a593Smuzhiyun
713*4882a593Smuzhiyun *p_base = NULL;
714*4882a593Smuzhiyun vmw_resource_unreference(&res);
715*4882a593Smuzhiyun }
716*4882a593Smuzhiyun
vmw_context_destroy_ioctl(struct drm_device * dev,void * data,struct drm_file * file_priv)717*4882a593Smuzhiyun int vmw_context_destroy_ioctl(struct drm_device *dev, void *data,
718*4882a593Smuzhiyun struct drm_file *file_priv)
719*4882a593Smuzhiyun {
720*4882a593Smuzhiyun struct drm_vmw_context_arg *arg = (struct drm_vmw_context_arg *)data;
721*4882a593Smuzhiyun struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
722*4882a593Smuzhiyun
723*4882a593Smuzhiyun return ttm_ref_object_base_unref(tfile, arg->cid, TTM_REF_USAGE);
724*4882a593Smuzhiyun }
725*4882a593Smuzhiyun
vmw_context_define(struct drm_device * dev,void * data,struct drm_file * file_priv,bool dx)726*4882a593Smuzhiyun static int vmw_context_define(struct drm_device *dev, void *data,
727*4882a593Smuzhiyun struct drm_file *file_priv, bool dx)
728*4882a593Smuzhiyun {
729*4882a593Smuzhiyun struct vmw_private *dev_priv = vmw_priv(dev);
730*4882a593Smuzhiyun struct vmw_user_context *ctx;
731*4882a593Smuzhiyun struct vmw_resource *res;
732*4882a593Smuzhiyun struct vmw_resource *tmp;
733*4882a593Smuzhiyun struct drm_vmw_context_arg *arg = (struct drm_vmw_context_arg *)data;
734*4882a593Smuzhiyun struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
735*4882a593Smuzhiyun struct ttm_operation_ctx ttm_opt_ctx = {
736*4882a593Smuzhiyun .interruptible = true,
737*4882a593Smuzhiyun .no_wait_gpu = false
738*4882a593Smuzhiyun };
739*4882a593Smuzhiyun int ret;
740*4882a593Smuzhiyun
741*4882a593Smuzhiyun if (!has_sm4_context(dev_priv) && dx) {
742*4882a593Smuzhiyun VMW_DEBUG_USER("DX contexts not supported by device.\n");
743*4882a593Smuzhiyun return -EINVAL;
744*4882a593Smuzhiyun }
745*4882a593Smuzhiyun
746*4882a593Smuzhiyun if (unlikely(vmw_user_context_size == 0))
747*4882a593Smuzhiyun vmw_user_context_size = ttm_round_pot(sizeof(*ctx)) +
748*4882a593Smuzhiyun ((dev_priv->has_mob) ? vmw_cmdbuf_res_man_size() : 0) +
749*4882a593Smuzhiyun + VMW_IDA_ACC_SIZE + TTM_OBJ_EXTRA_SIZE;
750*4882a593Smuzhiyun
751*4882a593Smuzhiyun ret = ttm_read_lock(&dev_priv->reservation_sem, true);
752*4882a593Smuzhiyun if (unlikely(ret != 0))
753*4882a593Smuzhiyun return ret;
754*4882a593Smuzhiyun
755*4882a593Smuzhiyun ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
756*4882a593Smuzhiyun vmw_user_context_size,
757*4882a593Smuzhiyun &ttm_opt_ctx);
758*4882a593Smuzhiyun if (unlikely(ret != 0)) {
759*4882a593Smuzhiyun if (ret != -ERESTARTSYS)
760*4882a593Smuzhiyun DRM_ERROR("Out of graphics memory for context"
761*4882a593Smuzhiyun " creation.\n");
762*4882a593Smuzhiyun goto out_unlock;
763*4882a593Smuzhiyun }
764*4882a593Smuzhiyun
765*4882a593Smuzhiyun ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
766*4882a593Smuzhiyun if (unlikely(!ctx)) {
767*4882a593Smuzhiyun ttm_mem_global_free(vmw_mem_glob(dev_priv),
768*4882a593Smuzhiyun vmw_user_context_size);
769*4882a593Smuzhiyun ret = -ENOMEM;
770*4882a593Smuzhiyun goto out_unlock;
771*4882a593Smuzhiyun }
772*4882a593Smuzhiyun
773*4882a593Smuzhiyun res = &ctx->res;
774*4882a593Smuzhiyun ctx->base.shareable = false;
775*4882a593Smuzhiyun ctx->base.tfile = NULL;
776*4882a593Smuzhiyun
777*4882a593Smuzhiyun /*
778*4882a593Smuzhiyun * From here on, the destructor takes over resource freeing.
779*4882a593Smuzhiyun */
780*4882a593Smuzhiyun
781*4882a593Smuzhiyun ret = vmw_context_init(dev_priv, res, vmw_user_context_free, dx);
782*4882a593Smuzhiyun if (unlikely(ret != 0))
783*4882a593Smuzhiyun goto out_unlock;
784*4882a593Smuzhiyun
785*4882a593Smuzhiyun tmp = vmw_resource_reference(&ctx->res);
786*4882a593Smuzhiyun ret = ttm_base_object_init(tfile, &ctx->base, false, VMW_RES_CONTEXT,
787*4882a593Smuzhiyun &vmw_user_context_base_release, NULL);
788*4882a593Smuzhiyun
789*4882a593Smuzhiyun if (unlikely(ret != 0)) {
790*4882a593Smuzhiyun vmw_resource_unreference(&tmp);
791*4882a593Smuzhiyun goto out_err;
792*4882a593Smuzhiyun }
793*4882a593Smuzhiyun
794*4882a593Smuzhiyun arg->cid = ctx->base.handle;
795*4882a593Smuzhiyun out_err:
796*4882a593Smuzhiyun vmw_resource_unreference(&res);
797*4882a593Smuzhiyun out_unlock:
798*4882a593Smuzhiyun ttm_read_unlock(&dev_priv->reservation_sem);
799*4882a593Smuzhiyun return ret;
800*4882a593Smuzhiyun }
801*4882a593Smuzhiyun
vmw_context_define_ioctl(struct drm_device * dev,void * data,struct drm_file * file_priv)802*4882a593Smuzhiyun int vmw_context_define_ioctl(struct drm_device *dev, void *data,
803*4882a593Smuzhiyun struct drm_file *file_priv)
804*4882a593Smuzhiyun {
805*4882a593Smuzhiyun return vmw_context_define(dev, data, file_priv, false);
806*4882a593Smuzhiyun }
807*4882a593Smuzhiyun
vmw_extended_context_define_ioctl(struct drm_device * dev,void * data,struct drm_file * file_priv)808*4882a593Smuzhiyun int vmw_extended_context_define_ioctl(struct drm_device *dev, void *data,
809*4882a593Smuzhiyun struct drm_file *file_priv)
810*4882a593Smuzhiyun {
811*4882a593Smuzhiyun union drm_vmw_extended_context_arg *arg = (typeof(arg)) data;
812*4882a593Smuzhiyun struct drm_vmw_context_arg *rep = &arg->rep;
813*4882a593Smuzhiyun
814*4882a593Smuzhiyun switch (arg->req) {
815*4882a593Smuzhiyun case drm_vmw_context_legacy:
816*4882a593Smuzhiyun return vmw_context_define(dev, rep, file_priv, false);
817*4882a593Smuzhiyun case drm_vmw_context_dx:
818*4882a593Smuzhiyun return vmw_context_define(dev, rep, file_priv, true);
819*4882a593Smuzhiyun default:
820*4882a593Smuzhiyun break;
821*4882a593Smuzhiyun }
822*4882a593Smuzhiyun return -EINVAL;
823*4882a593Smuzhiyun }
824*4882a593Smuzhiyun
825*4882a593Smuzhiyun /**
826*4882a593Smuzhiyun * vmw_context_binding_list - Return a list of context bindings
827*4882a593Smuzhiyun *
828*4882a593Smuzhiyun * @ctx: The context resource
829*4882a593Smuzhiyun *
830*4882a593Smuzhiyun * Returns the current list of bindings of the given context. Note that
831*4882a593Smuzhiyun * this list becomes stale as soon as the dev_priv::binding_mutex is unlocked.
832*4882a593Smuzhiyun */
vmw_context_binding_list(struct vmw_resource * ctx)833*4882a593Smuzhiyun struct list_head *vmw_context_binding_list(struct vmw_resource *ctx)
834*4882a593Smuzhiyun {
835*4882a593Smuzhiyun struct vmw_user_context *uctx =
836*4882a593Smuzhiyun container_of(ctx, struct vmw_user_context, res);
837*4882a593Smuzhiyun
838*4882a593Smuzhiyun return vmw_binding_state_list(uctx->cbs);
839*4882a593Smuzhiyun }
840*4882a593Smuzhiyun
vmw_context_res_man(struct vmw_resource * ctx)841*4882a593Smuzhiyun struct vmw_cmdbuf_res_manager *vmw_context_res_man(struct vmw_resource *ctx)
842*4882a593Smuzhiyun {
843*4882a593Smuzhiyun return container_of(ctx, struct vmw_user_context, res)->man;
844*4882a593Smuzhiyun }
845*4882a593Smuzhiyun
vmw_context_cotable(struct vmw_resource * ctx,SVGACOTableType cotable_type)846*4882a593Smuzhiyun struct vmw_resource *vmw_context_cotable(struct vmw_resource *ctx,
847*4882a593Smuzhiyun SVGACOTableType cotable_type)
848*4882a593Smuzhiyun {
849*4882a593Smuzhiyun u32 cotable_max = has_sm5_context(ctx->dev_priv) ?
850*4882a593Smuzhiyun SVGA_COTABLE_MAX : SVGA_COTABLE_DX10_MAX;
851*4882a593Smuzhiyun
852*4882a593Smuzhiyun if (cotable_type >= cotable_max)
853*4882a593Smuzhiyun return ERR_PTR(-EINVAL);
854*4882a593Smuzhiyun
855*4882a593Smuzhiyun return container_of(ctx, struct vmw_user_context, res)->
856*4882a593Smuzhiyun cotables[cotable_type];
857*4882a593Smuzhiyun }
858*4882a593Smuzhiyun
859*4882a593Smuzhiyun /**
860*4882a593Smuzhiyun * vmw_context_binding_state -
861*4882a593Smuzhiyun * Return a pointer to a context binding state structure
862*4882a593Smuzhiyun *
863*4882a593Smuzhiyun * @ctx: The context resource
864*4882a593Smuzhiyun *
865*4882a593Smuzhiyun * Returns the current state of bindings of the given context. Note that
866*4882a593Smuzhiyun * this state becomes stale as soon as the dev_priv::binding_mutex is unlocked.
867*4882a593Smuzhiyun */
868*4882a593Smuzhiyun struct vmw_ctx_binding_state *
vmw_context_binding_state(struct vmw_resource * ctx)869*4882a593Smuzhiyun vmw_context_binding_state(struct vmw_resource *ctx)
870*4882a593Smuzhiyun {
871*4882a593Smuzhiyun return container_of(ctx, struct vmw_user_context, res)->cbs;
872*4882a593Smuzhiyun }
873*4882a593Smuzhiyun
874*4882a593Smuzhiyun /**
875*4882a593Smuzhiyun * vmw_context_bind_dx_query -
876*4882a593Smuzhiyun * Sets query MOB for the context. If @mob is NULL, then this function will
877*4882a593Smuzhiyun * remove the association between the MOB and the context. This function
878*4882a593Smuzhiyun * assumes the binding_mutex is held.
879*4882a593Smuzhiyun *
880*4882a593Smuzhiyun * @ctx_res: The context resource
881*4882a593Smuzhiyun * @mob: a reference to the query MOB
882*4882a593Smuzhiyun *
883*4882a593Smuzhiyun * Returns -EINVAL if a MOB has already been set and does not match the one
884*4882a593Smuzhiyun * specified in the parameter. 0 otherwise.
885*4882a593Smuzhiyun */
vmw_context_bind_dx_query(struct vmw_resource * ctx_res,struct vmw_buffer_object * mob)886*4882a593Smuzhiyun int vmw_context_bind_dx_query(struct vmw_resource *ctx_res,
887*4882a593Smuzhiyun struct vmw_buffer_object *mob)
888*4882a593Smuzhiyun {
889*4882a593Smuzhiyun struct vmw_user_context *uctx =
890*4882a593Smuzhiyun container_of(ctx_res, struct vmw_user_context, res);
891*4882a593Smuzhiyun
892*4882a593Smuzhiyun if (mob == NULL) {
893*4882a593Smuzhiyun if (uctx->dx_query_mob) {
894*4882a593Smuzhiyun uctx->dx_query_mob->dx_query_ctx = NULL;
895*4882a593Smuzhiyun vmw_bo_unreference(&uctx->dx_query_mob);
896*4882a593Smuzhiyun uctx->dx_query_mob = NULL;
897*4882a593Smuzhiyun }
898*4882a593Smuzhiyun
899*4882a593Smuzhiyun return 0;
900*4882a593Smuzhiyun }
901*4882a593Smuzhiyun
902*4882a593Smuzhiyun /* Can only have one MOB per context for queries */
903*4882a593Smuzhiyun if (uctx->dx_query_mob && uctx->dx_query_mob != mob)
904*4882a593Smuzhiyun return -EINVAL;
905*4882a593Smuzhiyun
906*4882a593Smuzhiyun mob->dx_query_ctx = ctx_res;
907*4882a593Smuzhiyun
908*4882a593Smuzhiyun if (!uctx->dx_query_mob)
909*4882a593Smuzhiyun uctx->dx_query_mob = vmw_bo_reference(mob);
910*4882a593Smuzhiyun
911*4882a593Smuzhiyun return 0;
912*4882a593Smuzhiyun }
913*4882a593Smuzhiyun
914*4882a593Smuzhiyun /**
915*4882a593Smuzhiyun * vmw_context_get_dx_query_mob - Returns non-counted reference to DX query mob
916*4882a593Smuzhiyun *
917*4882a593Smuzhiyun * @ctx_res: The context resource
918*4882a593Smuzhiyun */
919*4882a593Smuzhiyun struct vmw_buffer_object *
vmw_context_get_dx_query_mob(struct vmw_resource * ctx_res)920*4882a593Smuzhiyun vmw_context_get_dx_query_mob(struct vmw_resource *ctx_res)
921*4882a593Smuzhiyun {
922*4882a593Smuzhiyun struct vmw_user_context *uctx =
923*4882a593Smuzhiyun container_of(ctx_res, struct vmw_user_context, res);
924*4882a593Smuzhiyun
925*4882a593Smuzhiyun return uctx->dx_query_mob;
926*4882a593Smuzhiyun }
927