xref: /OK3568_Linux_fs/kernel/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0 OR MIT
2*4882a593Smuzhiyun /**************************************************************************
3*4882a593Smuzhiyun  *
4*4882a593Smuzhiyun  * Copyright © 2011-2018 VMware, Inc., Palo Alto, CA., USA
5*4882a593Smuzhiyun  * All Rights Reserved.
6*4882a593Smuzhiyun  *
7*4882a593Smuzhiyun  * Permission is hereby granted, free of charge, to any person obtaining a
8*4882a593Smuzhiyun  * copy of this software and associated documentation files (the
9*4882a593Smuzhiyun  * "Software"), to deal in the Software without restriction, including
10*4882a593Smuzhiyun  * without limitation the rights to use, copy, modify, merge, publish,
11*4882a593Smuzhiyun  * distribute, sub license, and/or sell copies of the Software, and to
12*4882a593Smuzhiyun  * permit persons to whom the Software is furnished to do so, subject to
13*4882a593Smuzhiyun  * the following conditions:
14*4882a593Smuzhiyun  *
15*4882a593Smuzhiyun  * The above copyright notice and this permission notice (including the
16*4882a593Smuzhiyun  * next paragraph) shall be included in all copies or substantial portions
17*4882a593Smuzhiyun  * of the Software.
18*4882a593Smuzhiyun  *
19*4882a593Smuzhiyun  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20*4882a593Smuzhiyun  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21*4882a593Smuzhiyun  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
22*4882a593Smuzhiyun  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
23*4882a593Smuzhiyun  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
24*4882a593Smuzhiyun  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
25*4882a593Smuzhiyun  * USE OR OTHER DEALINGS IN THE SOFTWARE.
26*4882a593Smuzhiyun  *
27*4882a593Smuzhiyun  **************************************************************************/
28*4882a593Smuzhiyun 
29*4882a593Smuzhiyun #include <drm/ttm/ttm_placement.h>
30*4882a593Smuzhiyun 
31*4882a593Smuzhiyun #include "vmwgfx_drv.h"
32*4882a593Smuzhiyun #include "ttm_object.h"
33*4882a593Smuzhiyun 
34*4882a593Smuzhiyun 
35*4882a593Smuzhiyun /**
36*4882a593Smuzhiyun  * struct vmw_user_buffer_object - User-space-visible buffer object
37*4882a593Smuzhiyun  *
38*4882a593Smuzhiyun  * @prime: The prime object providing user visibility.
39*4882a593Smuzhiyun  * @vbo: The struct vmw_buffer_object
40*4882a593Smuzhiyun  */
41*4882a593Smuzhiyun struct vmw_user_buffer_object {
42*4882a593Smuzhiyun 	struct ttm_prime_object prime;
43*4882a593Smuzhiyun 	struct vmw_buffer_object vbo;
44*4882a593Smuzhiyun };
45*4882a593Smuzhiyun 
46*4882a593Smuzhiyun 
47*4882a593Smuzhiyun /**
48*4882a593Smuzhiyun  * vmw_buffer_object - Convert a struct ttm_buffer_object to a struct
49*4882a593Smuzhiyun  * vmw_buffer_object.
50*4882a593Smuzhiyun  *
51*4882a593Smuzhiyun  * @bo: Pointer to the TTM buffer object.
52*4882a593Smuzhiyun  * Return: Pointer to the struct vmw_buffer_object embedding the
53*4882a593Smuzhiyun  * TTM buffer object.
54*4882a593Smuzhiyun  */
55*4882a593Smuzhiyun static struct vmw_buffer_object *
vmw_buffer_object(struct ttm_buffer_object * bo)56*4882a593Smuzhiyun vmw_buffer_object(struct ttm_buffer_object *bo)
57*4882a593Smuzhiyun {
58*4882a593Smuzhiyun 	return container_of(bo, struct vmw_buffer_object, base);
59*4882a593Smuzhiyun }
60*4882a593Smuzhiyun 
61*4882a593Smuzhiyun 
62*4882a593Smuzhiyun /**
63*4882a593Smuzhiyun  * vmw_user_buffer_object - Convert a struct ttm_buffer_object to a struct
64*4882a593Smuzhiyun  * vmw_user_buffer_object.
65*4882a593Smuzhiyun  *
66*4882a593Smuzhiyun  * @bo: Pointer to the TTM buffer object.
67*4882a593Smuzhiyun  * Return: Pointer to the struct vmw_buffer_object embedding the TTM buffer
68*4882a593Smuzhiyun  * object.
69*4882a593Smuzhiyun  */
70*4882a593Smuzhiyun static struct vmw_user_buffer_object *
vmw_user_buffer_object(struct ttm_buffer_object * bo)71*4882a593Smuzhiyun vmw_user_buffer_object(struct ttm_buffer_object *bo)
72*4882a593Smuzhiyun {
73*4882a593Smuzhiyun 	struct vmw_buffer_object *vmw_bo = vmw_buffer_object(bo);
74*4882a593Smuzhiyun 
75*4882a593Smuzhiyun 	return container_of(vmw_bo, struct vmw_user_buffer_object, vbo);
76*4882a593Smuzhiyun }
77*4882a593Smuzhiyun 
78*4882a593Smuzhiyun 
79*4882a593Smuzhiyun /**
80*4882a593Smuzhiyun  * vmw_bo_pin_in_placement - Validate a buffer to placement.
81*4882a593Smuzhiyun  *
82*4882a593Smuzhiyun  * @dev_priv:  Driver private.
83*4882a593Smuzhiyun  * @buf:  DMA buffer to move.
84*4882a593Smuzhiyun  * @placement:  The placement to pin it.
85*4882a593Smuzhiyun  * @interruptible:  Use interruptible wait.
86*4882a593Smuzhiyun  * Return: Zero on success, Negative error code on failure. In particular
87*4882a593Smuzhiyun  * -ERESTARTSYS if interrupted by a signal
88*4882a593Smuzhiyun  */
vmw_bo_pin_in_placement(struct vmw_private * dev_priv,struct vmw_buffer_object * buf,struct ttm_placement * placement,bool interruptible)89*4882a593Smuzhiyun int vmw_bo_pin_in_placement(struct vmw_private *dev_priv,
90*4882a593Smuzhiyun 			    struct vmw_buffer_object *buf,
91*4882a593Smuzhiyun 			    struct ttm_placement *placement,
92*4882a593Smuzhiyun 			    bool interruptible)
93*4882a593Smuzhiyun {
94*4882a593Smuzhiyun 	struct ttm_operation_ctx ctx = {interruptible, false };
95*4882a593Smuzhiyun 	struct ttm_buffer_object *bo = &buf->base;
96*4882a593Smuzhiyun 	int ret;
97*4882a593Smuzhiyun 	uint32_t new_flags;
98*4882a593Smuzhiyun 
99*4882a593Smuzhiyun 	ret = ttm_write_lock(&dev_priv->reservation_sem, interruptible);
100*4882a593Smuzhiyun 	if (unlikely(ret != 0))
101*4882a593Smuzhiyun 		return ret;
102*4882a593Smuzhiyun 
103*4882a593Smuzhiyun 	vmw_execbuf_release_pinned_bo(dev_priv);
104*4882a593Smuzhiyun 
105*4882a593Smuzhiyun 	ret = ttm_bo_reserve(bo, interruptible, false, NULL);
106*4882a593Smuzhiyun 	if (unlikely(ret != 0))
107*4882a593Smuzhiyun 		goto err;
108*4882a593Smuzhiyun 
109*4882a593Smuzhiyun 	if (buf->pin_count > 0)
110*4882a593Smuzhiyun 		ret = ttm_bo_mem_compat(placement, &bo->mem,
111*4882a593Smuzhiyun 					&new_flags) == true ? 0 : -EINVAL;
112*4882a593Smuzhiyun 	else
113*4882a593Smuzhiyun 		ret = ttm_bo_validate(bo, placement, &ctx);
114*4882a593Smuzhiyun 
115*4882a593Smuzhiyun 	if (!ret)
116*4882a593Smuzhiyun 		vmw_bo_pin_reserved(buf, true);
117*4882a593Smuzhiyun 
118*4882a593Smuzhiyun 	ttm_bo_unreserve(bo);
119*4882a593Smuzhiyun 
120*4882a593Smuzhiyun err:
121*4882a593Smuzhiyun 	ttm_write_unlock(&dev_priv->reservation_sem);
122*4882a593Smuzhiyun 	return ret;
123*4882a593Smuzhiyun }
124*4882a593Smuzhiyun 
125*4882a593Smuzhiyun 
126*4882a593Smuzhiyun /**
127*4882a593Smuzhiyun  * vmw_bo_pin_in_vram_or_gmr - Move a buffer to vram or gmr.
128*4882a593Smuzhiyun  *
129*4882a593Smuzhiyun  * This function takes the reservation_sem in write mode.
130*4882a593Smuzhiyun  * Flushes and unpins the query bo to avoid failures.
131*4882a593Smuzhiyun  *
132*4882a593Smuzhiyun  * @dev_priv:  Driver private.
133*4882a593Smuzhiyun  * @buf:  DMA buffer to move.
134*4882a593Smuzhiyun  * @pin:  Pin buffer if true.
135*4882a593Smuzhiyun  * @interruptible:  Use interruptible wait.
136*4882a593Smuzhiyun  * Return: Zero on success, Negative error code on failure. In particular
137*4882a593Smuzhiyun  * -ERESTARTSYS if interrupted by a signal
138*4882a593Smuzhiyun  */
vmw_bo_pin_in_vram_or_gmr(struct vmw_private * dev_priv,struct vmw_buffer_object * buf,bool interruptible)139*4882a593Smuzhiyun int vmw_bo_pin_in_vram_or_gmr(struct vmw_private *dev_priv,
140*4882a593Smuzhiyun 			      struct vmw_buffer_object *buf,
141*4882a593Smuzhiyun 			      bool interruptible)
142*4882a593Smuzhiyun {
143*4882a593Smuzhiyun 	struct ttm_operation_ctx ctx = {interruptible, false };
144*4882a593Smuzhiyun 	struct ttm_buffer_object *bo = &buf->base;
145*4882a593Smuzhiyun 	int ret;
146*4882a593Smuzhiyun 	uint32_t new_flags;
147*4882a593Smuzhiyun 
148*4882a593Smuzhiyun 	ret = ttm_write_lock(&dev_priv->reservation_sem, interruptible);
149*4882a593Smuzhiyun 	if (unlikely(ret != 0))
150*4882a593Smuzhiyun 		return ret;
151*4882a593Smuzhiyun 
152*4882a593Smuzhiyun 	vmw_execbuf_release_pinned_bo(dev_priv);
153*4882a593Smuzhiyun 
154*4882a593Smuzhiyun 	ret = ttm_bo_reserve(bo, interruptible, false, NULL);
155*4882a593Smuzhiyun 	if (unlikely(ret != 0))
156*4882a593Smuzhiyun 		goto err;
157*4882a593Smuzhiyun 
158*4882a593Smuzhiyun 	if (buf->pin_count > 0) {
159*4882a593Smuzhiyun 		ret = ttm_bo_mem_compat(&vmw_vram_gmr_placement, &bo->mem,
160*4882a593Smuzhiyun 					&new_flags) == true ? 0 : -EINVAL;
161*4882a593Smuzhiyun 		goto out_unreserve;
162*4882a593Smuzhiyun 	}
163*4882a593Smuzhiyun 
164*4882a593Smuzhiyun 	ret = ttm_bo_validate(bo, &vmw_vram_gmr_placement, &ctx);
165*4882a593Smuzhiyun 	if (likely(ret == 0) || ret == -ERESTARTSYS)
166*4882a593Smuzhiyun 		goto out_unreserve;
167*4882a593Smuzhiyun 
168*4882a593Smuzhiyun 	ret = ttm_bo_validate(bo, &vmw_vram_placement, &ctx);
169*4882a593Smuzhiyun 
170*4882a593Smuzhiyun out_unreserve:
171*4882a593Smuzhiyun 	if (!ret)
172*4882a593Smuzhiyun 		vmw_bo_pin_reserved(buf, true);
173*4882a593Smuzhiyun 
174*4882a593Smuzhiyun 	ttm_bo_unreserve(bo);
175*4882a593Smuzhiyun err:
176*4882a593Smuzhiyun 	ttm_write_unlock(&dev_priv->reservation_sem);
177*4882a593Smuzhiyun 	return ret;
178*4882a593Smuzhiyun }
179*4882a593Smuzhiyun 
180*4882a593Smuzhiyun 
181*4882a593Smuzhiyun /**
182*4882a593Smuzhiyun  * vmw_bo_pin_in_vram - Move a buffer to vram.
183*4882a593Smuzhiyun  *
184*4882a593Smuzhiyun  * This function takes the reservation_sem in write mode.
185*4882a593Smuzhiyun  * Flushes and unpins the query bo to avoid failures.
186*4882a593Smuzhiyun  *
187*4882a593Smuzhiyun  * @dev_priv:  Driver private.
188*4882a593Smuzhiyun  * @buf:  DMA buffer to move.
189*4882a593Smuzhiyun  * @interruptible:  Use interruptible wait.
190*4882a593Smuzhiyun  * Return: Zero on success, Negative error code on failure. In particular
191*4882a593Smuzhiyun  * -ERESTARTSYS if interrupted by a signal
192*4882a593Smuzhiyun  */
vmw_bo_pin_in_vram(struct vmw_private * dev_priv,struct vmw_buffer_object * buf,bool interruptible)193*4882a593Smuzhiyun int vmw_bo_pin_in_vram(struct vmw_private *dev_priv,
194*4882a593Smuzhiyun 		       struct vmw_buffer_object *buf,
195*4882a593Smuzhiyun 		       bool interruptible)
196*4882a593Smuzhiyun {
197*4882a593Smuzhiyun 	return vmw_bo_pin_in_placement(dev_priv, buf, &vmw_vram_placement,
198*4882a593Smuzhiyun 				       interruptible);
199*4882a593Smuzhiyun }
200*4882a593Smuzhiyun 
201*4882a593Smuzhiyun 
202*4882a593Smuzhiyun /**
203*4882a593Smuzhiyun  * vmw_bo_pin_in_start_of_vram - Move a buffer to start of vram.
204*4882a593Smuzhiyun  *
205*4882a593Smuzhiyun  * This function takes the reservation_sem in write mode.
206*4882a593Smuzhiyun  * Flushes and unpins the query bo to avoid failures.
207*4882a593Smuzhiyun  *
208*4882a593Smuzhiyun  * @dev_priv:  Driver private.
209*4882a593Smuzhiyun  * @buf:  DMA buffer to pin.
210*4882a593Smuzhiyun  * @interruptible:  Use interruptible wait.
211*4882a593Smuzhiyun  * Return: Zero on success, Negative error code on failure. In particular
212*4882a593Smuzhiyun  * -ERESTARTSYS if interrupted by a signal
213*4882a593Smuzhiyun  */
vmw_bo_pin_in_start_of_vram(struct vmw_private * dev_priv,struct vmw_buffer_object * buf,bool interruptible)214*4882a593Smuzhiyun int vmw_bo_pin_in_start_of_vram(struct vmw_private *dev_priv,
215*4882a593Smuzhiyun 				struct vmw_buffer_object *buf,
216*4882a593Smuzhiyun 				bool interruptible)
217*4882a593Smuzhiyun {
218*4882a593Smuzhiyun 	struct ttm_operation_ctx ctx = {interruptible, false };
219*4882a593Smuzhiyun 	struct ttm_buffer_object *bo = &buf->base;
220*4882a593Smuzhiyun 	struct ttm_placement placement;
221*4882a593Smuzhiyun 	struct ttm_place place;
222*4882a593Smuzhiyun 	int ret = 0;
223*4882a593Smuzhiyun 	uint32_t new_flags;
224*4882a593Smuzhiyun 
225*4882a593Smuzhiyun 	place = vmw_vram_placement.placement[0];
226*4882a593Smuzhiyun 	place.lpfn = bo->num_pages;
227*4882a593Smuzhiyun 	placement.num_placement = 1;
228*4882a593Smuzhiyun 	placement.placement = &place;
229*4882a593Smuzhiyun 	placement.num_busy_placement = 1;
230*4882a593Smuzhiyun 	placement.busy_placement = &place;
231*4882a593Smuzhiyun 
232*4882a593Smuzhiyun 	ret = ttm_write_lock(&dev_priv->reservation_sem, interruptible);
233*4882a593Smuzhiyun 	if (unlikely(ret != 0))
234*4882a593Smuzhiyun 		return ret;
235*4882a593Smuzhiyun 
236*4882a593Smuzhiyun 	vmw_execbuf_release_pinned_bo(dev_priv);
237*4882a593Smuzhiyun 	ret = ttm_bo_reserve(bo, interruptible, false, NULL);
238*4882a593Smuzhiyun 	if (unlikely(ret != 0))
239*4882a593Smuzhiyun 		goto err_unlock;
240*4882a593Smuzhiyun 
241*4882a593Smuzhiyun 	/*
242*4882a593Smuzhiyun 	 * Is this buffer already in vram but not at the start of it?
243*4882a593Smuzhiyun 	 * In that case, evict it first because TTM isn't good at handling
244*4882a593Smuzhiyun 	 * that situation.
245*4882a593Smuzhiyun 	 */
246*4882a593Smuzhiyun 	if (bo->mem.mem_type == TTM_PL_VRAM &&
247*4882a593Smuzhiyun 	    bo->mem.start < bo->num_pages &&
248*4882a593Smuzhiyun 	    bo->mem.start > 0 &&
249*4882a593Smuzhiyun 	    buf->pin_count == 0) {
250*4882a593Smuzhiyun 		ctx.interruptible = false;
251*4882a593Smuzhiyun 		(void) ttm_bo_validate(bo, &vmw_sys_placement, &ctx);
252*4882a593Smuzhiyun 	}
253*4882a593Smuzhiyun 
254*4882a593Smuzhiyun 	if (buf->pin_count > 0)
255*4882a593Smuzhiyun 		ret = ttm_bo_mem_compat(&placement, &bo->mem,
256*4882a593Smuzhiyun 					&new_flags) == true ? 0 : -EINVAL;
257*4882a593Smuzhiyun 	else
258*4882a593Smuzhiyun 		ret = ttm_bo_validate(bo, &placement, &ctx);
259*4882a593Smuzhiyun 
260*4882a593Smuzhiyun 	/* For some reason we didn't end up at the start of vram */
261*4882a593Smuzhiyun 	WARN_ON(ret == 0 && bo->mem.start != 0);
262*4882a593Smuzhiyun 	if (!ret)
263*4882a593Smuzhiyun 		vmw_bo_pin_reserved(buf, true);
264*4882a593Smuzhiyun 
265*4882a593Smuzhiyun 	ttm_bo_unreserve(bo);
266*4882a593Smuzhiyun err_unlock:
267*4882a593Smuzhiyun 	ttm_write_unlock(&dev_priv->reservation_sem);
268*4882a593Smuzhiyun 
269*4882a593Smuzhiyun 	return ret;
270*4882a593Smuzhiyun }
271*4882a593Smuzhiyun 
272*4882a593Smuzhiyun 
273*4882a593Smuzhiyun /**
274*4882a593Smuzhiyun  * vmw_bo_unpin - Unpin the buffer given buffer, does not move the buffer.
275*4882a593Smuzhiyun  *
276*4882a593Smuzhiyun  * This function takes the reservation_sem in write mode.
277*4882a593Smuzhiyun  *
278*4882a593Smuzhiyun  * @dev_priv:  Driver private.
279*4882a593Smuzhiyun  * @buf:  DMA buffer to unpin.
280*4882a593Smuzhiyun  * @interruptible:  Use interruptible wait.
281*4882a593Smuzhiyun  * Return: Zero on success, Negative error code on failure. In particular
282*4882a593Smuzhiyun  * -ERESTARTSYS if interrupted by a signal
283*4882a593Smuzhiyun  */
vmw_bo_unpin(struct vmw_private * dev_priv,struct vmw_buffer_object * buf,bool interruptible)284*4882a593Smuzhiyun int vmw_bo_unpin(struct vmw_private *dev_priv,
285*4882a593Smuzhiyun 		 struct vmw_buffer_object *buf,
286*4882a593Smuzhiyun 		 bool interruptible)
287*4882a593Smuzhiyun {
288*4882a593Smuzhiyun 	struct ttm_buffer_object *bo = &buf->base;
289*4882a593Smuzhiyun 	int ret;
290*4882a593Smuzhiyun 
291*4882a593Smuzhiyun 	ret = ttm_read_lock(&dev_priv->reservation_sem, interruptible);
292*4882a593Smuzhiyun 	if (unlikely(ret != 0))
293*4882a593Smuzhiyun 		return ret;
294*4882a593Smuzhiyun 
295*4882a593Smuzhiyun 	ret = ttm_bo_reserve(bo, interruptible, false, NULL);
296*4882a593Smuzhiyun 	if (unlikely(ret != 0))
297*4882a593Smuzhiyun 		goto err;
298*4882a593Smuzhiyun 
299*4882a593Smuzhiyun 	vmw_bo_pin_reserved(buf, false);
300*4882a593Smuzhiyun 
301*4882a593Smuzhiyun 	ttm_bo_unreserve(bo);
302*4882a593Smuzhiyun 
303*4882a593Smuzhiyun err:
304*4882a593Smuzhiyun 	ttm_read_unlock(&dev_priv->reservation_sem);
305*4882a593Smuzhiyun 	return ret;
306*4882a593Smuzhiyun }
307*4882a593Smuzhiyun 
308*4882a593Smuzhiyun /**
309*4882a593Smuzhiyun  * vmw_bo_get_guest_ptr - Get the guest ptr representing the current placement
310*4882a593Smuzhiyun  * of a buffer.
311*4882a593Smuzhiyun  *
312*4882a593Smuzhiyun  * @bo: Pointer to a struct ttm_buffer_object. Must be pinned or reserved.
313*4882a593Smuzhiyun  * @ptr: SVGAGuestPtr returning the result.
314*4882a593Smuzhiyun  */
vmw_bo_get_guest_ptr(const struct ttm_buffer_object * bo,SVGAGuestPtr * ptr)315*4882a593Smuzhiyun void vmw_bo_get_guest_ptr(const struct ttm_buffer_object *bo,
316*4882a593Smuzhiyun 			  SVGAGuestPtr *ptr)
317*4882a593Smuzhiyun {
318*4882a593Smuzhiyun 	if (bo->mem.mem_type == TTM_PL_VRAM) {
319*4882a593Smuzhiyun 		ptr->gmrId = SVGA_GMR_FRAMEBUFFER;
320*4882a593Smuzhiyun 		ptr->offset = bo->mem.start << PAGE_SHIFT;
321*4882a593Smuzhiyun 	} else {
322*4882a593Smuzhiyun 		ptr->gmrId = bo->mem.start;
323*4882a593Smuzhiyun 		ptr->offset = 0;
324*4882a593Smuzhiyun 	}
325*4882a593Smuzhiyun }
326*4882a593Smuzhiyun 
327*4882a593Smuzhiyun 
328*4882a593Smuzhiyun /**
329*4882a593Smuzhiyun  * vmw_bo_pin_reserved - Pin or unpin a buffer object without moving it.
330*4882a593Smuzhiyun  *
331*4882a593Smuzhiyun  * @vbo: The buffer object. Must be reserved.
332*4882a593Smuzhiyun  * @pin: Whether to pin or unpin.
333*4882a593Smuzhiyun  *
334*4882a593Smuzhiyun  */
vmw_bo_pin_reserved(struct vmw_buffer_object * vbo,bool pin)335*4882a593Smuzhiyun void vmw_bo_pin_reserved(struct vmw_buffer_object *vbo, bool pin)
336*4882a593Smuzhiyun {
337*4882a593Smuzhiyun 	struct ttm_operation_ctx ctx = { false, true };
338*4882a593Smuzhiyun 	struct ttm_place pl;
339*4882a593Smuzhiyun 	struct ttm_placement placement;
340*4882a593Smuzhiyun 	struct ttm_buffer_object *bo = &vbo->base;
341*4882a593Smuzhiyun 	uint32_t old_mem_type = bo->mem.mem_type;
342*4882a593Smuzhiyun 	int ret;
343*4882a593Smuzhiyun 
344*4882a593Smuzhiyun 	dma_resv_assert_held(bo->base.resv);
345*4882a593Smuzhiyun 
346*4882a593Smuzhiyun 	if (pin) {
347*4882a593Smuzhiyun 		if (vbo->pin_count++ > 0)
348*4882a593Smuzhiyun 			return;
349*4882a593Smuzhiyun 	} else {
350*4882a593Smuzhiyun 		WARN_ON(vbo->pin_count <= 0);
351*4882a593Smuzhiyun 		if (--vbo->pin_count > 0)
352*4882a593Smuzhiyun 			return;
353*4882a593Smuzhiyun 	}
354*4882a593Smuzhiyun 
355*4882a593Smuzhiyun 	pl.fpfn = 0;
356*4882a593Smuzhiyun 	pl.lpfn = 0;
357*4882a593Smuzhiyun 	pl.mem_type = bo->mem.mem_type;
358*4882a593Smuzhiyun 	pl.flags = bo->mem.placement;
359*4882a593Smuzhiyun 	if (pin)
360*4882a593Smuzhiyun 		pl.flags |= TTM_PL_FLAG_NO_EVICT;
361*4882a593Smuzhiyun 	else
362*4882a593Smuzhiyun 		pl.flags &= ~TTM_PL_FLAG_NO_EVICT;
363*4882a593Smuzhiyun 
364*4882a593Smuzhiyun 	memset(&placement, 0, sizeof(placement));
365*4882a593Smuzhiyun 	placement.num_placement = 1;
366*4882a593Smuzhiyun 	placement.placement = &pl;
367*4882a593Smuzhiyun 
368*4882a593Smuzhiyun 	ret = ttm_bo_validate(bo, &placement, &ctx);
369*4882a593Smuzhiyun 
370*4882a593Smuzhiyun 	BUG_ON(ret != 0 || bo->mem.mem_type != old_mem_type);
371*4882a593Smuzhiyun }
372*4882a593Smuzhiyun 
373*4882a593Smuzhiyun 
374*4882a593Smuzhiyun /**
375*4882a593Smuzhiyun  * vmw_bo_map_and_cache - Map a buffer object and cache the map
376*4882a593Smuzhiyun  *
377*4882a593Smuzhiyun  * @vbo: The buffer object to map
378*4882a593Smuzhiyun  * Return: A kernel virtual address or NULL if mapping failed.
379*4882a593Smuzhiyun  *
380*4882a593Smuzhiyun  * This function maps a buffer object into the kernel address space, or
381*4882a593Smuzhiyun  * returns the virtual kernel address of an already existing map. The virtual
382*4882a593Smuzhiyun  * address remains valid as long as the buffer object is pinned or reserved.
383*4882a593Smuzhiyun  * The cached map is torn down on either
384*4882a593Smuzhiyun  * 1) Buffer object move
385*4882a593Smuzhiyun  * 2) Buffer object swapout
386*4882a593Smuzhiyun  * 3) Buffer object destruction
387*4882a593Smuzhiyun  *
388*4882a593Smuzhiyun  */
vmw_bo_map_and_cache(struct vmw_buffer_object * vbo)389*4882a593Smuzhiyun void *vmw_bo_map_and_cache(struct vmw_buffer_object *vbo)
390*4882a593Smuzhiyun {
391*4882a593Smuzhiyun 	struct ttm_buffer_object *bo = &vbo->base;
392*4882a593Smuzhiyun 	bool not_used;
393*4882a593Smuzhiyun 	void *virtual;
394*4882a593Smuzhiyun 	int ret;
395*4882a593Smuzhiyun 
396*4882a593Smuzhiyun 	virtual = ttm_kmap_obj_virtual(&vbo->map, &not_used);
397*4882a593Smuzhiyun 	if (virtual)
398*4882a593Smuzhiyun 		return virtual;
399*4882a593Smuzhiyun 
400*4882a593Smuzhiyun 	ret = ttm_bo_kmap(bo, 0, bo->num_pages, &vbo->map);
401*4882a593Smuzhiyun 	if (ret)
402*4882a593Smuzhiyun 		DRM_ERROR("Buffer object map failed: %d.\n", ret);
403*4882a593Smuzhiyun 
404*4882a593Smuzhiyun 	return ttm_kmap_obj_virtual(&vbo->map, &not_used);
405*4882a593Smuzhiyun }
406*4882a593Smuzhiyun 
407*4882a593Smuzhiyun 
408*4882a593Smuzhiyun /**
409*4882a593Smuzhiyun  * vmw_bo_unmap - Tear down a cached buffer object map.
410*4882a593Smuzhiyun  *
411*4882a593Smuzhiyun  * @vbo: The buffer object whose map we are tearing down.
412*4882a593Smuzhiyun  *
413*4882a593Smuzhiyun  * This function tears down a cached map set up using
414*4882a593Smuzhiyun  * vmw_buffer_object_map_and_cache().
415*4882a593Smuzhiyun  */
vmw_bo_unmap(struct vmw_buffer_object * vbo)416*4882a593Smuzhiyun void vmw_bo_unmap(struct vmw_buffer_object *vbo)
417*4882a593Smuzhiyun {
418*4882a593Smuzhiyun 	if (vbo->map.bo == NULL)
419*4882a593Smuzhiyun 		return;
420*4882a593Smuzhiyun 
421*4882a593Smuzhiyun 	ttm_bo_kunmap(&vbo->map);
422*4882a593Smuzhiyun }
423*4882a593Smuzhiyun 
424*4882a593Smuzhiyun 
425*4882a593Smuzhiyun /**
426*4882a593Smuzhiyun  * vmw_bo_acc_size - Calculate the pinned memory usage of buffers
427*4882a593Smuzhiyun  *
428*4882a593Smuzhiyun  * @dev_priv: Pointer to a struct vmw_private identifying the device.
429*4882a593Smuzhiyun  * @size: The requested buffer size.
430*4882a593Smuzhiyun  * @user: Whether this is an ordinary dma buffer or a user dma buffer.
431*4882a593Smuzhiyun  */
vmw_bo_acc_size(struct vmw_private * dev_priv,size_t size,bool user)432*4882a593Smuzhiyun static size_t vmw_bo_acc_size(struct vmw_private *dev_priv, size_t size,
433*4882a593Smuzhiyun 			      bool user)
434*4882a593Smuzhiyun {
435*4882a593Smuzhiyun 	static size_t struct_size, user_struct_size;
436*4882a593Smuzhiyun 	size_t num_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
437*4882a593Smuzhiyun 	size_t page_array_size = ttm_round_pot(num_pages * sizeof(void *));
438*4882a593Smuzhiyun 
439*4882a593Smuzhiyun 	if (unlikely(struct_size == 0)) {
440*4882a593Smuzhiyun 		size_t backend_size = ttm_round_pot(vmw_tt_size);
441*4882a593Smuzhiyun 
442*4882a593Smuzhiyun 		struct_size = backend_size +
443*4882a593Smuzhiyun 			ttm_round_pot(sizeof(struct vmw_buffer_object));
444*4882a593Smuzhiyun 		user_struct_size = backend_size +
445*4882a593Smuzhiyun 		  ttm_round_pot(sizeof(struct vmw_user_buffer_object)) +
446*4882a593Smuzhiyun 				      TTM_OBJ_EXTRA_SIZE;
447*4882a593Smuzhiyun 	}
448*4882a593Smuzhiyun 
449*4882a593Smuzhiyun 	if (dev_priv->map_mode == vmw_dma_alloc_coherent)
450*4882a593Smuzhiyun 		page_array_size +=
451*4882a593Smuzhiyun 			ttm_round_pot(num_pages * sizeof(dma_addr_t));
452*4882a593Smuzhiyun 
453*4882a593Smuzhiyun 	return ((user) ? user_struct_size : struct_size) +
454*4882a593Smuzhiyun 		page_array_size;
455*4882a593Smuzhiyun }
456*4882a593Smuzhiyun 
457*4882a593Smuzhiyun 
458*4882a593Smuzhiyun /**
459*4882a593Smuzhiyun  * vmw_bo_bo_free - vmw buffer object destructor
460*4882a593Smuzhiyun  *
461*4882a593Smuzhiyun  * @bo: Pointer to the embedded struct ttm_buffer_object
462*4882a593Smuzhiyun  */
vmw_bo_bo_free(struct ttm_buffer_object * bo)463*4882a593Smuzhiyun void vmw_bo_bo_free(struct ttm_buffer_object *bo)
464*4882a593Smuzhiyun {
465*4882a593Smuzhiyun 	struct vmw_buffer_object *vmw_bo = vmw_buffer_object(bo);
466*4882a593Smuzhiyun 
467*4882a593Smuzhiyun 	WARN_ON(vmw_bo->dirty);
468*4882a593Smuzhiyun 	WARN_ON(!RB_EMPTY_ROOT(&vmw_bo->res_tree));
469*4882a593Smuzhiyun 	vmw_bo_unmap(vmw_bo);
470*4882a593Smuzhiyun 	kfree(vmw_bo);
471*4882a593Smuzhiyun }
472*4882a593Smuzhiyun 
473*4882a593Smuzhiyun 
474*4882a593Smuzhiyun /**
475*4882a593Smuzhiyun  * vmw_user_bo_destroy - vmw buffer object destructor
476*4882a593Smuzhiyun  *
477*4882a593Smuzhiyun  * @bo: Pointer to the embedded struct ttm_buffer_object
478*4882a593Smuzhiyun  */
vmw_user_bo_destroy(struct ttm_buffer_object * bo)479*4882a593Smuzhiyun static void vmw_user_bo_destroy(struct ttm_buffer_object *bo)
480*4882a593Smuzhiyun {
481*4882a593Smuzhiyun 	struct vmw_user_buffer_object *vmw_user_bo = vmw_user_buffer_object(bo);
482*4882a593Smuzhiyun 	struct vmw_buffer_object *vbo = &vmw_user_bo->vbo;
483*4882a593Smuzhiyun 
484*4882a593Smuzhiyun 	WARN_ON(vbo->dirty);
485*4882a593Smuzhiyun 	WARN_ON(!RB_EMPTY_ROOT(&vbo->res_tree));
486*4882a593Smuzhiyun 	vmw_bo_unmap(vbo);
487*4882a593Smuzhiyun 	ttm_prime_object_kfree(vmw_user_bo, prime);
488*4882a593Smuzhiyun }
489*4882a593Smuzhiyun 
490*4882a593Smuzhiyun 
491*4882a593Smuzhiyun /**
492*4882a593Smuzhiyun  * vmw_bo_init - Initialize a vmw buffer object
493*4882a593Smuzhiyun  *
494*4882a593Smuzhiyun  * @dev_priv: Pointer to the device private struct
495*4882a593Smuzhiyun  * @vmw_bo: Pointer to the struct vmw_buffer_object to initialize.
496*4882a593Smuzhiyun  * @size: Buffer object size in bytes.
497*4882a593Smuzhiyun  * @placement: Initial placement.
498*4882a593Smuzhiyun  * @interruptible: Whether waits should be performed interruptible.
499*4882a593Smuzhiyun  * @bo_free: The buffer object destructor.
500*4882a593Smuzhiyun  * Returns: Zero on success, negative error code on error.
501*4882a593Smuzhiyun  *
502*4882a593Smuzhiyun  * Note that on error, the code will free the buffer object.
503*4882a593Smuzhiyun  */
vmw_bo_init(struct vmw_private * dev_priv,struct vmw_buffer_object * vmw_bo,size_t size,struct ttm_placement * placement,bool interruptible,void (* bo_free)(struct ttm_buffer_object * bo))504*4882a593Smuzhiyun int vmw_bo_init(struct vmw_private *dev_priv,
505*4882a593Smuzhiyun 		struct vmw_buffer_object *vmw_bo,
506*4882a593Smuzhiyun 		size_t size, struct ttm_placement *placement,
507*4882a593Smuzhiyun 		bool interruptible,
508*4882a593Smuzhiyun 		void (*bo_free)(struct ttm_buffer_object *bo))
509*4882a593Smuzhiyun {
510*4882a593Smuzhiyun 	struct ttm_bo_device *bdev = &dev_priv->bdev;
511*4882a593Smuzhiyun 	size_t acc_size;
512*4882a593Smuzhiyun 	int ret;
513*4882a593Smuzhiyun 	bool user = (bo_free == &vmw_user_bo_destroy);
514*4882a593Smuzhiyun 
515*4882a593Smuzhiyun 	WARN_ON_ONCE(!bo_free && (!user && (bo_free != vmw_bo_bo_free)));
516*4882a593Smuzhiyun 
517*4882a593Smuzhiyun 	acc_size = vmw_bo_acc_size(dev_priv, size, user);
518*4882a593Smuzhiyun 	memset(vmw_bo, 0, sizeof(*vmw_bo));
519*4882a593Smuzhiyun 	BUILD_BUG_ON(TTM_MAX_BO_PRIORITY <= 3);
520*4882a593Smuzhiyun 	vmw_bo->base.priority = 3;
521*4882a593Smuzhiyun 	vmw_bo->res_tree = RB_ROOT;
522*4882a593Smuzhiyun 
523*4882a593Smuzhiyun 	ret = ttm_bo_init(bdev, &vmw_bo->base, size,
524*4882a593Smuzhiyun 			  ttm_bo_type_device, placement,
525*4882a593Smuzhiyun 			  0, interruptible, acc_size,
526*4882a593Smuzhiyun 			  NULL, NULL, bo_free);
527*4882a593Smuzhiyun 	return ret;
528*4882a593Smuzhiyun }
529*4882a593Smuzhiyun 
530*4882a593Smuzhiyun 
531*4882a593Smuzhiyun /**
532*4882a593Smuzhiyun  * vmw_user_bo_release - TTM reference base object release callback for
533*4882a593Smuzhiyun  * vmw user buffer objects
534*4882a593Smuzhiyun  *
535*4882a593Smuzhiyun  * @p_base: The TTM base object pointer about to be unreferenced.
536*4882a593Smuzhiyun  *
537*4882a593Smuzhiyun  * Clears the TTM base object pointer and drops the reference the
538*4882a593Smuzhiyun  * base object has on the underlying struct vmw_buffer_object.
539*4882a593Smuzhiyun  */
vmw_user_bo_release(struct ttm_base_object ** p_base)540*4882a593Smuzhiyun static void vmw_user_bo_release(struct ttm_base_object **p_base)
541*4882a593Smuzhiyun {
542*4882a593Smuzhiyun 	struct vmw_user_buffer_object *vmw_user_bo;
543*4882a593Smuzhiyun 	struct ttm_base_object *base = *p_base;
544*4882a593Smuzhiyun 
545*4882a593Smuzhiyun 	*p_base = NULL;
546*4882a593Smuzhiyun 
547*4882a593Smuzhiyun 	if (unlikely(base == NULL))
548*4882a593Smuzhiyun 		return;
549*4882a593Smuzhiyun 
550*4882a593Smuzhiyun 	vmw_user_bo = container_of(base, struct vmw_user_buffer_object,
551*4882a593Smuzhiyun 				   prime.base);
552*4882a593Smuzhiyun 	ttm_bo_put(&vmw_user_bo->vbo.base);
553*4882a593Smuzhiyun }
554*4882a593Smuzhiyun 
555*4882a593Smuzhiyun 
556*4882a593Smuzhiyun /**
557*4882a593Smuzhiyun  * vmw_user_bo_ref_obj-release - TTM synccpu reference object release callback
558*4882a593Smuzhiyun  * for vmw user buffer objects
559*4882a593Smuzhiyun  *
560*4882a593Smuzhiyun  * @base: Pointer to the TTM base object
561*4882a593Smuzhiyun  * @ref_type: Reference type of the reference reaching zero.
562*4882a593Smuzhiyun  *
563*4882a593Smuzhiyun  * Called when user-space drops its last synccpu reference on the buffer
564*4882a593Smuzhiyun  * object, Either explicitly or as part of a cleanup file close.
565*4882a593Smuzhiyun  */
vmw_user_bo_ref_obj_release(struct ttm_base_object * base,enum ttm_ref_type ref_type)566*4882a593Smuzhiyun static void vmw_user_bo_ref_obj_release(struct ttm_base_object *base,
567*4882a593Smuzhiyun 					enum ttm_ref_type ref_type)
568*4882a593Smuzhiyun {
569*4882a593Smuzhiyun 	struct vmw_user_buffer_object *user_bo;
570*4882a593Smuzhiyun 
571*4882a593Smuzhiyun 	user_bo = container_of(base, struct vmw_user_buffer_object, prime.base);
572*4882a593Smuzhiyun 
573*4882a593Smuzhiyun 	switch (ref_type) {
574*4882a593Smuzhiyun 	case TTM_REF_SYNCCPU_WRITE:
575*4882a593Smuzhiyun 		atomic_dec(&user_bo->vbo.cpu_writers);
576*4882a593Smuzhiyun 		break;
577*4882a593Smuzhiyun 	default:
578*4882a593Smuzhiyun 		WARN_ONCE(true, "Undefined buffer object reference release.\n");
579*4882a593Smuzhiyun 	}
580*4882a593Smuzhiyun }
581*4882a593Smuzhiyun 
582*4882a593Smuzhiyun 
583*4882a593Smuzhiyun /**
584*4882a593Smuzhiyun  * vmw_user_bo_alloc - Allocate a user buffer object
585*4882a593Smuzhiyun  *
586*4882a593Smuzhiyun  * @dev_priv: Pointer to a struct device private.
587*4882a593Smuzhiyun  * @tfile: Pointer to a struct ttm_object_file on which to register the user
588*4882a593Smuzhiyun  * object.
589*4882a593Smuzhiyun  * @size: Size of the buffer object.
590*4882a593Smuzhiyun  * @shareable: Boolean whether the buffer is shareable with other open files.
591*4882a593Smuzhiyun  * @handle: Pointer to where the handle value should be assigned.
592*4882a593Smuzhiyun  * @p_vbo: Pointer to where the refcounted struct vmw_buffer_object pointer
593*4882a593Smuzhiyun  * should be assigned.
594*4882a593Smuzhiyun  * Return: Zero on success, negative error code on error.
595*4882a593Smuzhiyun  */
vmw_user_bo_alloc(struct vmw_private * dev_priv,struct ttm_object_file * tfile,uint32_t size,bool shareable,uint32_t * handle,struct vmw_buffer_object ** p_vbo,struct ttm_base_object ** p_base)596*4882a593Smuzhiyun int vmw_user_bo_alloc(struct vmw_private *dev_priv,
597*4882a593Smuzhiyun 		      struct ttm_object_file *tfile,
598*4882a593Smuzhiyun 		      uint32_t size,
599*4882a593Smuzhiyun 		      bool shareable,
600*4882a593Smuzhiyun 		      uint32_t *handle,
601*4882a593Smuzhiyun 		      struct vmw_buffer_object **p_vbo,
602*4882a593Smuzhiyun 		      struct ttm_base_object **p_base)
603*4882a593Smuzhiyun {
604*4882a593Smuzhiyun 	struct vmw_user_buffer_object *user_bo;
605*4882a593Smuzhiyun 	int ret;
606*4882a593Smuzhiyun 
607*4882a593Smuzhiyun 	user_bo = kzalloc(sizeof(*user_bo), GFP_KERNEL);
608*4882a593Smuzhiyun 	if (unlikely(!user_bo)) {
609*4882a593Smuzhiyun 		DRM_ERROR("Failed to allocate a buffer.\n");
610*4882a593Smuzhiyun 		return -ENOMEM;
611*4882a593Smuzhiyun 	}
612*4882a593Smuzhiyun 
613*4882a593Smuzhiyun 	ret = vmw_bo_init(dev_priv, &user_bo->vbo, size,
614*4882a593Smuzhiyun 			  (dev_priv->has_mob) ?
615*4882a593Smuzhiyun 			  &vmw_sys_placement :
616*4882a593Smuzhiyun 			  &vmw_vram_sys_placement, true,
617*4882a593Smuzhiyun 			  &vmw_user_bo_destroy);
618*4882a593Smuzhiyun 	if (unlikely(ret != 0))
619*4882a593Smuzhiyun 		return ret;
620*4882a593Smuzhiyun 
621*4882a593Smuzhiyun 	ttm_bo_get(&user_bo->vbo.base);
622*4882a593Smuzhiyun 	ret = ttm_prime_object_init(tfile,
623*4882a593Smuzhiyun 				    size,
624*4882a593Smuzhiyun 				    &user_bo->prime,
625*4882a593Smuzhiyun 				    shareable,
626*4882a593Smuzhiyun 				    ttm_buffer_type,
627*4882a593Smuzhiyun 				    &vmw_user_bo_release,
628*4882a593Smuzhiyun 				    &vmw_user_bo_ref_obj_release);
629*4882a593Smuzhiyun 	if (unlikely(ret != 0)) {
630*4882a593Smuzhiyun 		ttm_bo_put(&user_bo->vbo.base);
631*4882a593Smuzhiyun 		goto out_no_base_object;
632*4882a593Smuzhiyun 	}
633*4882a593Smuzhiyun 
634*4882a593Smuzhiyun 	*p_vbo = &user_bo->vbo;
635*4882a593Smuzhiyun 	if (p_base) {
636*4882a593Smuzhiyun 		*p_base = &user_bo->prime.base;
637*4882a593Smuzhiyun 		kref_get(&(*p_base)->refcount);
638*4882a593Smuzhiyun 	}
639*4882a593Smuzhiyun 	*handle = user_bo->prime.base.handle;
640*4882a593Smuzhiyun 
641*4882a593Smuzhiyun out_no_base_object:
642*4882a593Smuzhiyun 	return ret;
643*4882a593Smuzhiyun }
644*4882a593Smuzhiyun 
645*4882a593Smuzhiyun 
646*4882a593Smuzhiyun /**
647*4882a593Smuzhiyun  * vmw_user_bo_verify_access - verify access permissions on this
648*4882a593Smuzhiyun  * buffer object.
649*4882a593Smuzhiyun  *
650*4882a593Smuzhiyun  * @bo: Pointer to the buffer object being accessed
651*4882a593Smuzhiyun  * @tfile: Identifying the caller.
652*4882a593Smuzhiyun  */
vmw_user_bo_verify_access(struct ttm_buffer_object * bo,struct ttm_object_file * tfile)653*4882a593Smuzhiyun int vmw_user_bo_verify_access(struct ttm_buffer_object *bo,
654*4882a593Smuzhiyun 			      struct ttm_object_file *tfile)
655*4882a593Smuzhiyun {
656*4882a593Smuzhiyun 	struct vmw_user_buffer_object *vmw_user_bo;
657*4882a593Smuzhiyun 
658*4882a593Smuzhiyun 	if (unlikely(bo->destroy != vmw_user_bo_destroy))
659*4882a593Smuzhiyun 		return -EPERM;
660*4882a593Smuzhiyun 
661*4882a593Smuzhiyun 	vmw_user_bo = vmw_user_buffer_object(bo);
662*4882a593Smuzhiyun 
663*4882a593Smuzhiyun 	/* Check that the caller has opened the object. */
664*4882a593Smuzhiyun 	if (likely(ttm_ref_object_exists(tfile, &vmw_user_bo->prime.base)))
665*4882a593Smuzhiyun 		return 0;
666*4882a593Smuzhiyun 
667*4882a593Smuzhiyun 	DRM_ERROR("Could not grant buffer access.\n");
668*4882a593Smuzhiyun 	return -EPERM;
669*4882a593Smuzhiyun }
670*4882a593Smuzhiyun 
671*4882a593Smuzhiyun 
672*4882a593Smuzhiyun /**
673*4882a593Smuzhiyun  * vmw_user_bo_synccpu_grab - Grab a struct vmw_user_buffer_object for cpu
674*4882a593Smuzhiyun  * access, idling previous GPU operations on the buffer and optionally
675*4882a593Smuzhiyun  * blocking it for further command submissions.
676*4882a593Smuzhiyun  *
677*4882a593Smuzhiyun  * @user_bo: Pointer to the buffer object being grabbed for CPU access
678*4882a593Smuzhiyun  * @tfile: Identifying the caller.
679*4882a593Smuzhiyun  * @flags: Flags indicating how the grab should be performed.
680*4882a593Smuzhiyun  * Return: Zero on success, Negative error code on error. In particular,
681*4882a593Smuzhiyun  * -EBUSY will be returned if a dontblock operation is requested and the
682*4882a593Smuzhiyun  * buffer object is busy, and -ERESTARTSYS will be returned if a wait is
683*4882a593Smuzhiyun  * interrupted by a signal.
684*4882a593Smuzhiyun  *
685*4882a593Smuzhiyun  * A blocking grab will be automatically released when @tfile is closed.
686*4882a593Smuzhiyun  */
vmw_user_bo_synccpu_grab(struct vmw_user_buffer_object * user_bo,struct ttm_object_file * tfile,uint32_t flags)687*4882a593Smuzhiyun static int vmw_user_bo_synccpu_grab(struct vmw_user_buffer_object *user_bo,
688*4882a593Smuzhiyun 				    struct ttm_object_file *tfile,
689*4882a593Smuzhiyun 				    uint32_t flags)
690*4882a593Smuzhiyun {
691*4882a593Smuzhiyun 	bool nonblock = !!(flags & drm_vmw_synccpu_dontblock);
692*4882a593Smuzhiyun 	struct ttm_buffer_object *bo = &user_bo->vbo.base;
693*4882a593Smuzhiyun 	bool existed;
694*4882a593Smuzhiyun 	int ret;
695*4882a593Smuzhiyun 
696*4882a593Smuzhiyun 	if (flags & drm_vmw_synccpu_allow_cs) {
697*4882a593Smuzhiyun 		long lret;
698*4882a593Smuzhiyun 
699*4882a593Smuzhiyun 		lret = dma_resv_wait_timeout_rcu
700*4882a593Smuzhiyun 			(bo->base.resv, true, true,
701*4882a593Smuzhiyun 			 nonblock ? 0 : MAX_SCHEDULE_TIMEOUT);
702*4882a593Smuzhiyun 		if (!lret)
703*4882a593Smuzhiyun 			return -EBUSY;
704*4882a593Smuzhiyun 		else if (lret < 0)
705*4882a593Smuzhiyun 			return lret;
706*4882a593Smuzhiyun 		return 0;
707*4882a593Smuzhiyun 	}
708*4882a593Smuzhiyun 
709*4882a593Smuzhiyun 	ret = ttm_bo_reserve(bo, true, nonblock, NULL);
710*4882a593Smuzhiyun 	if (unlikely(ret != 0))
711*4882a593Smuzhiyun 		return ret;
712*4882a593Smuzhiyun 
713*4882a593Smuzhiyun 	ret = ttm_bo_wait(bo, true, nonblock);
714*4882a593Smuzhiyun 	if (likely(ret == 0))
715*4882a593Smuzhiyun 		atomic_inc(&user_bo->vbo.cpu_writers);
716*4882a593Smuzhiyun 
717*4882a593Smuzhiyun 	ttm_bo_unreserve(bo);
718*4882a593Smuzhiyun 	if (unlikely(ret != 0))
719*4882a593Smuzhiyun 		return ret;
720*4882a593Smuzhiyun 
721*4882a593Smuzhiyun 	ret = ttm_ref_object_add(tfile, &user_bo->prime.base,
722*4882a593Smuzhiyun 				 TTM_REF_SYNCCPU_WRITE, &existed, false);
723*4882a593Smuzhiyun 	if (ret != 0 || existed)
724*4882a593Smuzhiyun 		atomic_dec(&user_bo->vbo.cpu_writers);
725*4882a593Smuzhiyun 
726*4882a593Smuzhiyun 	return ret;
727*4882a593Smuzhiyun }
728*4882a593Smuzhiyun 
729*4882a593Smuzhiyun /**
730*4882a593Smuzhiyun  * vmw_user_bo_synccpu_release - Release a previous grab for CPU access,
731*4882a593Smuzhiyun  * and unblock command submission on the buffer if blocked.
732*4882a593Smuzhiyun  *
733*4882a593Smuzhiyun  * @handle: Handle identifying the buffer object.
734*4882a593Smuzhiyun  * @tfile: Identifying the caller.
735*4882a593Smuzhiyun  * @flags: Flags indicating the type of release.
736*4882a593Smuzhiyun  */
vmw_user_bo_synccpu_release(uint32_t handle,struct ttm_object_file * tfile,uint32_t flags)737*4882a593Smuzhiyun static int vmw_user_bo_synccpu_release(uint32_t handle,
738*4882a593Smuzhiyun 					   struct ttm_object_file *tfile,
739*4882a593Smuzhiyun 					   uint32_t flags)
740*4882a593Smuzhiyun {
741*4882a593Smuzhiyun 	if (!(flags & drm_vmw_synccpu_allow_cs))
742*4882a593Smuzhiyun 		return ttm_ref_object_base_unref(tfile, handle,
743*4882a593Smuzhiyun 						 TTM_REF_SYNCCPU_WRITE);
744*4882a593Smuzhiyun 
745*4882a593Smuzhiyun 	return 0;
746*4882a593Smuzhiyun }
747*4882a593Smuzhiyun 
748*4882a593Smuzhiyun 
749*4882a593Smuzhiyun /**
750*4882a593Smuzhiyun  * vmw_user_bo_synccpu_ioctl - ioctl function implementing the synccpu
751*4882a593Smuzhiyun  * functionality.
752*4882a593Smuzhiyun  *
753*4882a593Smuzhiyun  * @dev: Identifies the drm device.
754*4882a593Smuzhiyun  * @data: Pointer to the ioctl argument.
755*4882a593Smuzhiyun  * @file_priv: Identifies the caller.
756*4882a593Smuzhiyun  * Return: Zero on success, negative error code on error.
757*4882a593Smuzhiyun  *
758*4882a593Smuzhiyun  * This function checks the ioctl arguments for validity and calls the
759*4882a593Smuzhiyun  * relevant synccpu functions.
760*4882a593Smuzhiyun  */
vmw_user_bo_synccpu_ioctl(struct drm_device * dev,void * data,struct drm_file * file_priv)761*4882a593Smuzhiyun int vmw_user_bo_synccpu_ioctl(struct drm_device *dev, void *data,
762*4882a593Smuzhiyun 			      struct drm_file *file_priv)
763*4882a593Smuzhiyun {
764*4882a593Smuzhiyun 	struct drm_vmw_synccpu_arg *arg =
765*4882a593Smuzhiyun 		(struct drm_vmw_synccpu_arg *) data;
766*4882a593Smuzhiyun 	struct vmw_buffer_object *vbo;
767*4882a593Smuzhiyun 	struct vmw_user_buffer_object *user_bo;
768*4882a593Smuzhiyun 	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
769*4882a593Smuzhiyun 	struct ttm_base_object *buffer_base;
770*4882a593Smuzhiyun 	int ret;
771*4882a593Smuzhiyun 
772*4882a593Smuzhiyun 	if ((arg->flags & (drm_vmw_synccpu_read | drm_vmw_synccpu_write)) == 0
773*4882a593Smuzhiyun 	    || (arg->flags & ~(drm_vmw_synccpu_read | drm_vmw_synccpu_write |
774*4882a593Smuzhiyun 			       drm_vmw_synccpu_dontblock |
775*4882a593Smuzhiyun 			       drm_vmw_synccpu_allow_cs)) != 0) {
776*4882a593Smuzhiyun 		DRM_ERROR("Illegal synccpu flags.\n");
777*4882a593Smuzhiyun 		return -EINVAL;
778*4882a593Smuzhiyun 	}
779*4882a593Smuzhiyun 
780*4882a593Smuzhiyun 	switch (arg->op) {
781*4882a593Smuzhiyun 	case drm_vmw_synccpu_grab:
782*4882a593Smuzhiyun 		ret = vmw_user_bo_lookup(tfile, arg->handle, &vbo,
783*4882a593Smuzhiyun 					     &buffer_base);
784*4882a593Smuzhiyun 		if (unlikely(ret != 0))
785*4882a593Smuzhiyun 			return ret;
786*4882a593Smuzhiyun 
787*4882a593Smuzhiyun 		user_bo = container_of(vbo, struct vmw_user_buffer_object,
788*4882a593Smuzhiyun 				       vbo);
789*4882a593Smuzhiyun 		ret = vmw_user_bo_synccpu_grab(user_bo, tfile, arg->flags);
790*4882a593Smuzhiyun 		vmw_bo_unreference(&vbo);
791*4882a593Smuzhiyun 		ttm_base_object_unref(&buffer_base);
792*4882a593Smuzhiyun 		if (unlikely(ret != 0 && ret != -ERESTARTSYS &&
793*4882a593Smuzhiyun 			     ret != -EBUSY)) {
794*4882a593Smuzhiyun 			DRM_ERROR("Failed synccpu grab on handle 0x%08x.\n",
795*4882a593Smuzhiyun 				  (unsigned int) arg->handle);
796*4882a593Smuzhiyun 			return ret;
797*4882a593Smuzhiyun 		}
798*4882a593Smuzhiyun 		break;
799*4882a593Smuzhiyun 	case drm_vmw_synccpu_release:
800*4882a593Smuzhiyun 		ret = vmw_user_bo_synccpu_release(arg->handle, tfile,
801*4882a593Smuzhiyun 						  arg->flags);
802*4882a593Smuzhiyun 		if (unlikely(ret != 0)) {
803*4882a593Smuzhiyun 			DRM_ERROR("Failed synccpu release on handle 0x%08x.\n",
804*4882a593Smuzhiyun 				  (unsigned int) arg->handle);
805*4882a593Smuzhiyun 			return ret;
806*4882a593Smuzhiyun 		}
807*4882a593Smuzhiyun 		break;
808*4882a593Smuzhiyun 	default:
809*4882a593Smuzhiyun 		DRM_ERROR("Invalid synccpu operation.\n");
810*4882a593Smuzhiyun 		return -EINVAL;
811*4882a593Smuzhiyun 	}
812*4882a593Smuzhiyun 
813*4882a593Smuzhiyun 	return 0;
814*4882a593Smuzhiyun }
815*4882a593Smuzhiyun 
816*4882a593Smuzhiyun 
817*4882a593Smuzhiyun /**
818*4882a593Smuzhiyun  * vmw_bo_alloc_ioctl - ioctl function implementing the buffer object
819*4882a593Smuzhiyun  * allocation functionality.
820*4882a593Smuzhiyun  *
821*4882a593Smuzhiyun  * @dev: Identifies the drm device.
822*4882a593Smuzhiyun  * @data: Pointer to the ioctl argument.
823*4882a593Smuzhiyun  * @file_priv: Identifies the caller.
824*4882a593Smuzhiyun  * Return: Zero on success, negative error code on error.
825*4882a593Smuzhiyun  *
826*4882a593Smuzhiyun  * This function checks the ioctl arguments for validity and allocates a
827*4882a593Smuzhiyun  * struct vmw_user_buffer_object bo.
828*4882a593Smuzhiyun  */
vmw_bo_alloc_ioctl(struct drm_device * dev,void * data,struct drm_file * file_priv)829*4882a593Smuzhiyun int vmw_bo_alloc_ioctl(struct drm_device *dev, void *data,
830*4882a593Smuzhiyun 		       struct drm_file *file_priv)
831*4882a593Smuzhiyun {
832*4882a593Smuzhiyun 	struct vmw_private *dev_priv = vmw_priv(dev);
833*4882a593Smuzhiyun 	union drm_vmw_alloc_dmabuf_arg *arg =
834*4882a593Smuzhiyun 	    (union drm_vmw_alloc_dmabuf_arg *)data;
835*4882a593Smuzhiyun 	struct drm_vmw_alloc_dmabuf_req *req = &arg->req;
836*4882a593Smuzhiyun 	struct drm_vmw_dmabuf_rep *rep = &arg->rep;
837*4882a593Smuzhiyun 	struct vmw_buffer_object *vbo;
838*4882a593Smuzhiyun 	uint32_t handle;
839*4882a593Smuzhiyun 	int ret;
840*4882a593Smuzhiyun 
841*4882a593Smuzhiyun 	ret = ttm_read_lock(&dev_priv->reservation_sem, true);
842*4882a593Smuzhiyun 	if (unlikely(ret != 0))
843*4882a593Smuzhiyun 		return ret;
844*4882a593Smuzhiyun 
845*4882a593Smuzhiyun 	ret = vmw_user_bo_alloc(dev_priv, vmw_fpriv(file_priv)->tfile,
846*4882a593Smuzhiyun 				req->size, false, &handle, &vbo,
847*4882a593Smuzhiyun 				NULL);
848*4882a593Smuzhiyun 	if (unlikely(ret != 0))
849*4882a593Smuzhiyun 		goto out_no_bo;
850*4882a593Smuzhiyun 
851*4882a593Smuzhiyun 	rep->handle = handle;
852*4882a593Smuzhiyun 	rep->map_handle = drm_vma_node_offset_addr(&vbo->base.base.vma_node);
853*4882a593Smuzhiyun 	rep->cur_gmr_id = handle;
854*4882a593Smuzhiyun 	rep->cur_gmr_offset = 0;
855*4882a593Smuzhiyun 
856*4882a593Smuzhiyun 	vmw_bo_unreference(&vbo);
857*4882a593Smuzhiyun 
858*4882a593Smuzhiyun out_no_bo:
859*4882a593Smuzhiyun 	ttm_read_unlock(&dev_priv->reservation_sem);
860*4882a593Smuzhiyun 
861*4882a593Smuzhiyun 	return ret;
862*4882a593Smuzhiyun }
863*4882a593Smuzhiyun 
864*4882a593Smuzhiyun 
865*4882a593Smuzhiyun /**
866*4882a593Smuzhiyun  * vmw_bo_unref_ioctl - Generic handle close ioctl.
867*4882a593Smuzhiyun  *
868*4882a593Smuzhiyun  * @dev: Identifies the drm device.
869*4882a593Smuzhiyun  * @data: Pointer to the ioctl argument.
870*4882a593Smuzhiyun  * @file_priv: Identifies the caller.
871*4882a593Smuzhiyun  * Return: Zero on success, negative error code on error.
872*4882a593Smuzhiyun  *
873*4882a593Smuzhiyun  * This function checks the ioctl arguments for validity and closes a
874*4882a593Smuzhiyun  * handle to a TTM base object, optionally freeing the object.
875*4882a593Smuzhiyun  */
vmw_bo_unref_ioctl(struct drm_device * dev,void * data,struct drm_file * file_priv)876*4882a593Smuzhiyun int vmw_bo_unref_ioctl(struct drm_device *dev, void *data,
877*4882a593Smuzhiyun 		       struct drm_file *file_priv)
878*4882a593Smuzhiyun {
879*4882a593Smuzhiyun 	struct drm_vmw_unref_dmabuf_arg *arg =
880*4882a593Smuzhiyun 	    (struct drm_vmw_unref_dmabuf_arg *)data;
881*4882a593Smuzhiyun 
882*4882a593Smuzhiyun 	return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
883*4882a593Smuzhiyun 					 arg->handle,
884*4882a593Smuzhiyun 					 TTM_REF_USAGE);
885*4882a593Smuzhiyun }
886*4882a593Smuzhiyun 
887*4882a593Smuzhiyun 
888*4882a593Smuzhiyun /**
889*4882a593Smuzhiyun  * vmw_user_bo_lookup - Look up a vmw user buffer object from a handle.
890*4882a593Smuzhiyun  *
891*4882a593Smuzhiyun  * @tfile: The TTM object file the handle is registered with.
892*4882a593Smuzhiyun  * @handle: The user buffer object handle
893*4882a593Smuzhiyun  * @out: Pointer to a where a pointer to the embedded
894*4882a593Smuzhiyun  * struct vmw_buffer_object should be placed.
895*4882a593Smuzhiyun  * @p_base: Pointer to where a pointer to the TTM base object should be
896*4882a593Smuzhiyun  * placed, or NULL if no such pointer is required.
897*4882a593Smuzhiyun  * Return: Zero on success, Negative error code on error.
898*4882a593Smuzhiyun  *
899*4882a593Smuzhiyun  * Both the output base object pointer and the vmw buffer object pointer
900*4882a593Smuzhiyun  * will be refcounted.
901*4882a593Smuzhiyun  */
vmw_user_bo_lookup(struct ttm_object_file * tfile,uint32_t handle,struct vmw_buffer_object ** out,struct ttm_base_object ** p_base)902*4882a593Smuzhiyun int vmw_user_bo_lookup(struct ttm_object_file *tfile,
903*4882a593Smuzhiyun 		       uint32_t handle, struct vmw_buffer_object **out,
904*4882a593Smuzhiyun 		       struct ttm_base_object **p_base)
905*4882a593Smuzhiyun {
906*4882a593Smuzhiyun 	struct vmw_user_buffer_object *vmw_user_bo;
907*4882a593Smuzhiyun 	struct ttm_base_object *base;
908*4882a593Smuzhiyun 
909*4882a593Smuzhiyun 	base = ttm_base_object_lookup(tfile, handle);
910*4882a593Smuzhiyun 	if (unlikely(base == NULL)) {
911*4882a593Smuzhiyun 		DRM_ERROR("Invalid buffer object handle 0x%08lx.\n",
912*4882a593Smuzhiyun 			  (unsigned long)handle);
913*4882a593Smuzhiyun 		return -ESRCH;
914*4882a593Smuzhiyun 	}
915*4882a593Smuzhiyun 
916*4882a593Smuzhiyun 	if (unlikely(ttm_base_object_type(base) != ttm_buffer_type)) {
917*4882a593Smuzhiyun 		ttm_base_object_unref(&base);
918*4882a593Smuzhiyun 		DRM_ERROR("Invalid buffer object handle 0x%08lx.\n",
919*4882a593Smuzhiyun 			  (unsigned long)handle);
920*4882a593Smuzhiyun 		return -EINVAL;
921*4882a593Smuzhiyun 	}
922*4882a593Smuzhiyun 
923*4882a593Smuzhiyun 	vmw_user_bo = container_of(base, struct vmw_user_buffer_object,
924*4882a593Smuzhiyun 				   prime.base);
925*4882a593Smuzhiyun 	ttm_bo_get(&vmw_user_bo->vbo.base);
926*4882a593Smuzhiyun 	if (p_base)
927*4882a593Smuzhiyun 		*p_base = base;
928*4882a593Smuzhiyun 	else
929*4882a593Smuzhiyun 		ttm_base_object_unref(&base);
930*4882a593Smuzhiyun 	*out = &vmw_user_bo->vbo;
931*4882a593Smuzhiyun 
932*4882a593Smuzhiyun 	return 0;
933*4882a593Smuzhiyun }
934*4882a593Smuzhiyun 
935*4882a593Smuzhiyun /**
936*4882a593Smuzhiyun  * vmw_user_bo_noref_lookup - Look up a vmw user buffer object without reference
937*4882a593Smuzhiyun  * @tfile: The TTM object file the handle is registered with.
938*4882a593Smuzhiyun  * @handle: The user buffer object handle.
939*4882a593Smuzhiyun  *
940*4882a593Smuzhiyun  * This function looks up a struct vmw_user_bo and returns a pointer to the
941*4882a593Smuzhiyun  * struct vmw_buffer_object it derives from without refcounting the pointer.
942*4882a593Smuzhiyun  * The returned pointer is only valid until vmw_user_bo_noref_release() is
943*4882a593Smuzhiyun  * called, and the object pointed to by the returned pointer may be doomed.
944*4882a593Smuzhiyun  * Any persistent usage of the object requires a refcount to be taken using
945*4882a593Smuzhiyun  * ttm_bo_reference_unless_doomed(). Iff this function returns successfully it
946*4882a593Smuzhiyun  * needs to be paired with vmw_user_bo_noref_release() and no sleeping-
947*4882a593Smuzhiyun  * or scheduling functions may be called inbetween these function calls.
948*4882a593Smuzhiyun  *
949*4882a593Smuzhiyun  * Return: A struct vmw_buffer_object pointer if successful or negative
950*4882a593Smuzhiyun  * error pointer on failure.
951*4882a593Smuzhiyun  */
952*4882a593Smuzhiyun struct vmw_buffer_object *
vmw_user_bo_noref_lookup(struct ttm_object_file * tfile,u32 handle)953*4882a593Smuzhiyun vmw_user_bo_noref_lookup(struct ttm_object_file *tfile, u32 handle)
954*4882a593Smuzhiyun {
955*4882a593Smuzhiyun 	struct vmw_user_buffer_object *vmw_user_bo;
956*4882a593Smuzhiyun 	struct ttm_base_object *base;
957*4882a593Smuzhiyun 
958*4882a593Smuzhiyun 	base = ttm_base_object_noref_lookup(tfile, handle);
959*4882a593Smuzhiyun 	if (!base) {
960*4882a593Smuzhiyun 		DRM_ERROR("Invalid buffer object handle 0x%08lx.\n",
961*4882a593Smuzhiyun 			  (unsigned long)handle);
962*4882a593Smuzhiyun 		return ERR_PTR(-ESRCH);
963*4882a593Smuzhiyun 	}
964*4882a593Smuzhiyun 
965*4882a593Smuzhiyun 	if (unlikely(ttm_base_object_type(base) != ttm_buffer_type)) {
966*4882a593Smuzhiyun 		ttm_base_object_noref_release();
967*4882a593Smuzhiyun 		DRM_ERROR("Invalid buffer object handle 0x%08lx.\n",
968*4882a593Smuzhiyun 			  (unsigned long)handle);
969*4882a593Smuzhiyun 		return ERR_PTR(-EINVAL);
970*4882a593Smuzhiyun 	}
971*4882a593Smuzhiyun 
972*4882a593Smuzhiyun 	vmw_user_bo = container_of(base, struct vmw_user_buffer_object,
973*4882a593Smuzhiyun 				   prime.base);
974*4882a593Smuzhiyun 	return &vmw_user_bo->vbo;
975*4882a593Smuzhiyun }
976*4882a593Smuzhiyun 
977*4882a593Smuzhiyun /**
978*4882a593Smuzhiyun  * vmw_user_bo_reference - Open a handle to a vmw user buffer object.
979*4882a593Smuzhiyun  *
980*4882a593Smuzhiyun  * @tfile: The TTM object file to register the handle with.
981*4882a593Smuzhiyun  * @vbo: The embedded vmw buffer object.
982*4882a593Smuzhiyun  * @handle: Pointer to where the new handle should be placed.
983*4882a593Smuzhiyun  * Return: Zero on success, Negative error code on error.
984*4882a593Smuzhiyun  */
vmw_user_bo_reference(struct ttm_object_file * tfile,struct vmw_buffer_object * vbo,uint32_t * handle)985*4882a593Smuzhiyun int vmw_user_bo_reference(struct ttm_object_file *tfile,
986*4882a593Smuzhiyun 			  struct vmw_buffer_object *vbo,
987*4882a593Smuzhiyun 			  uint32_t *handle)
988*4882a593Smuzhiyun {
989*4882a593Smuzhiyun 	struct vmw_user_buffer_object *user_bo;
990*4882a593Smuzhiyun 
991*4882a593Smuzhiyun 	if (vbo->base.destroy != vmw_user_bo_destroy)
992*4882a593Smuzhiyun 		return -EINVAL;
993*4882a593Smuzhiyun 
994*4882a593Smuzhiyun 	user_bo = container_of(vbo, struct vmw_user_buffer_object, vbo);
995*4882a593Smuzhiyun 
996*4882a593Smuzhiyun 	*handle = user_bo->prime.base.handle;
997*4882a593Smuzhiyun 	return ttm_ref_object_add(tfile, &user_bo->prime.base,
998*4882a593Smuzhiyun 				  TTM_REF_USAGE, NULL, false);
999*4882a593Smuzhiyun }
1000*4882a593Smuzhiyun 
1001*4882a593Smuzhiyun 
1002*4882a593Smuzhiyun /**
1003*4882a593Smuzhiyun  * vmw_bo_fence_single - Utility function to fence a single TTM buffer
1004*4882a593Smuzhiyun  *                       object without unreserving it.
1005*4882a593Smuzhiyun  *
1006*4882a593Smuzhiyun  * @bo:             Pointer to the struct ttm_buffer_object to fence.
1007*4882a593Smuzhiyun  * @fence:          Pointer to the fence. If NULL, this function will
1008*4882a593Smuzhiyun  *                  insert a fence into the command stream..
1009*4882a593Smuzhiyun  *
1010*4882a593Smuzhiyun  * Contrary to the ttm_eu version of this function, it takes only
1011*4882a593Smuzhiyun  * a single buffer object instead of a list, and it also doesn't
1012*4882a593Smuzhiyun  * unreserve the buffer object, which needs to be done separately.
1013*4882a593Smuzhiyun  */
vmw_bo_fence_single(struct ttm_buffer_object * bo,struct vmw_fence_obj * fence)1014*4882a593Smuzhiyun void vmw_bo_fence_single(struct ttm_buffer_object *bo,
1015*4882a593Smuzhiyun 			 struct vmw_fence_obj *fence)
1016*4882a593Smuzhiyun {
1017*4882a593Smuzhiyun 	struct ttm_bo_device *bdev = bo->bdev;
1018*4882a593Smuzhiyun 
1019*4882a593Smuzhiyun 	struct vmw_private *dev_priv =
1020*4882a593Smuzhiyun 		container_of(bdev, struct vmw_private, bdev);
1021*4882a593Smuzhiyun 
1022*4882a593Smuzhiyun 	if (fence == NULL) {
1023*4882a593Smuzhiyun 		vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
1024*4882a593Smuzhiyun 		dma_resv_add_excl_fence(bo->base.resv, &fence->base);
1025*4882a593Smuzhiyun 		dma_fence_put(&fence->base);
1026*4882a593Smuzhiyun 	} else
1027*4882a593Smuzhiyun 		dma_resv_add_excl_fence(bo->base.resv, &fence->base);
1028*4882a593Smuzhiyun }
1029*4882a593Smuzhiyun 
1030*4882a593Smuzhiyun 
1031*4882a593Smuzhiyun /**
1032*4882a593Smuzhiyun  * vmw_dumb_create - Create a dumb kms buffer
1033*4882a593Smuzhiyun  *
1034*4882a593Smuzhiyun  * @file_priv: Pointer to a struct drm_file identifying the caller.
1035*4882a593Smuzhiyun  * @dev: Pointer to the drm device.
1036*4882a593Smuzhiyun  * @args: Pointer to a struct drm_mode_create_dumb structure
1037*4882a593Smuzhiyun  * Return: Zero on success, negative error code on failure.
1038*4882a593Smuzhiyun  *
1039*4882a593Smuzhiyun  * This is a driver callback for the core drm create_dumb functionality.
1040*4882a593Smuzhiyun  * Note that this is very similar to the vmw_bo_alloc ioctl, except
1041*4882a593Smuzhiyun  * that the arguments have a different format.
1042*4882a593Smuzhiyun  */
vmw_dumb_create(struct drm_file * file_priv,struct drm_device * dev,struct drm_mode_create_dumb * args)1043*4882a593Smuzhiyun int vmw_dumb_create(struct drm_file *file_priv,
1044*4882a593Smuzhiyun 		    struct drm_device *dev,
1045*4882a593Smuzhiyun 		    struct drm_mode_create_dumb *args)
1046*4882a593Smuzhiyun {
1047*4882a593Smuzhiyun 	struct vmw_private *dev_priv = vmw_priv(dev);
1048*4882a593Smuzhiyun 	struct vmw_buffer_object *vbo;
1049*4882a593Smuzhiyun 	int ret;
1050*4882a593Smuzhiyun 
1051*4882a593Smuzhiyun 	args->pitch = args->width * ((args->bpp + 7) / 8);
1052*4882a593Smuzhiyun 	args->size = args->pitch * args->height;
1053*4882a593Smuzhiyun 
1054*4882a593Smuzhiyun 	ret = ttm_read_lock(&dev_priv->reservation_sem, true);
1055*4882a593Smuzhiyun 	if (unlikely(ret != 0))
1056*4882a593Smuzhiyun 		return ret;
1057*4882a593Smuzhiyun 
1058*4882a593Smuzhiyun 	ret = vmw_user_bo_alloc(dev_priv, vmw_fpriv(file_priv)->tfile,
1059*4882a593Smuzhiyun 				    args->size, false, &args->handle,
1060*4882a593Smuzhiyun 				    &vbo, NULL);
1061*4882a593Smuzhiyun 	if (unlikely(ret != 0))
1062*4882a593Smuzhiyun 		goto out_no_bo;
1063*4882a593Smuzhiyun 
1064*4882a593Smuzhiyun 	vmw_bo_unreference(&vbo);
1065*4882a593Smuzhiyun out_no_bo:
1066*4882a593Smuzhiyun 	ttm_read_unlock(&dev_priv->reservation_sem);
1067*4882a593Smuzhiyun 	return ret;
1068*4882a593Smuzhiyun }
1069*4882a593Smuzhiyun 
1070*4882a593Smuzhiyun 
1071*4882a593Smuzhiyun /**
1072*4882a593Smuzhiyun  * vmw_dumb_map_offset - Return the address space offset of a dumb buffer
1073*4882a593Smuzhiyun  *
1074*4882a593Smuzhiyun  * @file_priv: Pointer to a struct drm_file identifying the caller.
1075*4882a593Smuzhiyun  * @dev: Pointer to the drm device.
1076*4882a593Smuzhiyun  * @handle: Handle identifying the dumb buffer.
1077*4882a593Smuzhiyun  * @offset: The address space offset returned.
1078*4882a593Smuzhiyun  * Return: Zero on success, negative error code on failure.
1079*4882a593Smuzhiyun  *
1080*4882a593Smuzhiyun  * This is a driver callback for the core drm dumb_map_offset functionality.
1081*4882a593Smuzhiyun  */
vmw_dumb_map_offset(struct drm_file * file_priv,struct drm_device * dev,uint32_t handle,uint64_t * offset)1082*4882a593Smuzhiyun int vmw_dumb_map_offset(struct drm_file *file_priv,
1083*4882a593Smuzhiyun 			struct drm_device *dev, uint32_t handle,
1084*4882a593Smuzhiyun 			uint64_t *offset)
1085*4882a593Smuzhiyun {
1086*4882a593Smuzhiyun 	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
1087*4882a593Smuzhiyun 	struct vmw_buffer_object *out_buf;
1088*4882a593Smuzhiyun 	int ret;
1089*4882a593Smuzhiyun 
1090*4882a593Smuzhiyun 	ret = vmw_user_bo_lookup(tfile, handle, &out_buf, NULL);
1091*4882a593Smuzhiyun 	if (ret != 0)
1092*4882a593Smuzhiyun 		return -EINVAL;
1093*4882a593Smuzhiyun 
1094*4882a593Smuzhiyun 	*offset = drm_vma_node_offset_addr(&out_buf->base.base.vma_node);
1095*4882a593Smuzhiyun 	vmw_bo_unreference(&out_buf);
1096*4882a593Smuzhiyun 	return 0;
1097*4882a593Smuzhiyun }
1098*4882a593Smuzhiyun 
1099*4882a593Smuzhiyun 
1100*4882a593Smuzhiyun /**
1101*4882a593Smuzhiyun  * vmw_dumb_destroy - Destroy a dumb boffer
1102*4882a593Smuzhiyun  *
1103*4882a593Smuzhiyun  * @file_priv: Pointer to a struct drm_file identifying the caller.
1104*4882a593Smuzhiyun  * @dev: Pointer to the drm device.
1105*4882a593Smuzhiyun  * @handle: Handle identifying the dumb buffer.
1106*4882a593Smuzhiyun  * Return: Zero on success, negative error code on failure.
1107*4882a593Smuzhiyun  *
1108*4882a593Smuzhiyun  * This is a driver callback for the core drm dumb_destroy functionality.
1109*4882a593Smuzhiyun  */
vmw_dumb_destroy(struct drm_file * file_priv,struct drm_device * dev,uint32_t handle)1110*4882a593Smuzhiyun int vmw_dumb_destroy(struct drm_file *file_priv,
1111*4882a593Smuzhiyun 		     struct drm_device *dev,
1112*4882a593Smuzhiyun 		     uint32_t handle)
1113*4882a593Smuzhiyun {
1114*4882a593Smuzhiyun 	return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
1115*4882a593Smuzhiyun 					 handle, TTM_REF_USAGE);
1116*4882a593Smuzhiyun }
1117*4882a593Smuzhiyun 
1118*4882a593Smuzhiyun 
1119*4882a593Smuzhiyun /**
1120*4882a593Smuzhiyun  * vmw_bo_swap_notify - swapout notify callback.
1121*4882a593Smuzhiyun  *
1122*4882a593Smuzhiyun  * @bo: The buffer object to be swapped out.
1123*4882a593Smuzhiyun  */
vmw_bo_swap_notify(struct ttm_buffer_object * bo)1124*4882a593Smuzhiyun void vmw_bo_swap_notify(struct ttm_buffer_object *bo)
1125*4882a593Smuzhiyun {
1126*4882a593Smuzhiyun 	/* Is @bo embedded in a struct vmw_buffer_object? */
1127*4882a593Smuzhiyun 	if (bo->destroy != vmw_bo_bo_free &&
1128*4882a593Smuzhiyun 	    bo->destroy != vmw_user_bo_destroy)
1129*4882a593Smuzhiyun 		return;
1130*4882a593Smuzhiyun 
1131*4882a593Smuzhiyun 	/* Kill any cached kernel maps before swapout */
1132*4882a593Smuzhiyun 	vmw_bo_unmap(vmw_buffer_object(bo));
1133*4882a593Smuzhiyun }
1134*4882a593Smuzhiyun 
1135*4882a593Smuzhiyun 
1136*4882a593Smuzhiyun /**
1137*4882a593Smuzhiyun  * vmw_bo_move_notify - TTM move_notify_callback
1138*4882a593Smuzhiyun  *
1139*4882a593Smuzhiyun  * @bo: The TTM buffer object about to move.
1140*4882a593Smuzhiyun  * @mem: The struct ttm_resource indicating to what memory
1141*4882a593Smuzhiyun  *       region the move is taking place.
1142*4882a593Smuzhiyun  *
1143*4882a593Smuzhiyun  * Detaches cached maps and device bindings that require that the
1144*4882a593Smuzhiyun  * buffer doesn't move.
1145*4882a593Smuzhiyun  */
vmw_bo_move_notify(struct ttm_buffer_object * bo,struct ttm_resource * mem)1146*4882a593Smuzhiyun void vmw_bo_move_notify(struct ttm_buffer_object *bo,
1147*4882a593Smuzhiyun 			struct ttm_resource *mem)
1148*4882a593Smuzhiyun {
1149*4882a593Smuzhiyun 	struct vmw_buffer_object *vbo;
1150*4882a593Smuzhiyun 
1151*4882a593Smuzhiyun 	if (mem == NULL)
1152*4882a593Smuzhiyun 		return;
1153*4882a593Smuzhiyun 
1154*4882a593Smuzhiyun 	/* Make sure @bo is embedded in a struct vmw_buffer_object? */
1155*4882a593Smuzhiyun 	if (bo->destroy != vmw_bo_bo_free &&
1156*4882a593Smuzhiyun 	    bo->destroy != vmw_user_bo_destroy)
1157*4882a593Smuzhiyun 		return;
1158*4882a593Smuzhiyun 
1159*4882a593Smuzhiyun 	vbo = container_of(bo, struct vmw_buffer_object, base);
1160*4882a593Smuzhiyun 
1161*4882a593Smuzhiyun 	/*
1162*4882a593Smuzhiyun 	 * Kill any cached kernel maps before move to or from VRAM.
1163*4882a593Smuzhiyun 	 * With other types of moves, the underlying pages stay the same,
1164*4882a593Smuzhiyun 	 * and the map can be kept.
1165*4882a593Smuzhiyun 	 */
1166*4882a593Smuzhiyun 	if (mem->mem_type == TTM_PL_VRAM || bo->mem.mem_type == TTM_PL_VRAM)
1167*4882a593Smuzhiyun 		vmw_bo_unmap(vbo);
1168*4882a593Smuzhiyun 
1169*4882a593Smuzhiyun 	/*
1170*4882a593Smuzhiyun 	 * If we're moving a backup MOB out of MOB placement, then make sure we
1171*4882a593Smuzhiyun 	 * read back all resource content first, and unbind the MOB from
1172*4882a593Smuzhiyun 	 * the resource.
1173*4882a593Smuzhiyun 	 */
1174*4882a593Smuzhiyun 	if (mem->mem_type != VMW_PL_MOB && bo->mem.mem_type == VMW_PL_MOB)
1175*4882a593Smuzhiyun 		vmw_resource_unbind_list(vbo);
1176*4882a593Smuzhiyun }
1177