xref: /OK3568_Linux_fs/kernel/drivers/gpu/drm/qxl/qxl_object.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun  * Copyright 2013 Red Hat Inc.
3*4882a593Smuzhiyun  *
4*4882a593Smuzhiyun  * Permission is hereby granted, free of charge, to any person obtaining a
5*4882a593Smuzhiyun  * copy of this software and associated documentation files (the "Software"),
6*4882a593Smuzhiyun  * to deal in the Software without restriction, including without limitation
7*4882a593Smuzhiyun  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8*4882a593Smuzhiyun  * and/or sell copies of the Software, and to permit persons to whom the
9*4882a593Smuzhiyun  * Software is furnished to do so, subject to the following conditions:
10*4882a593Smuzhiyun  *
11*4882a593Smuzhiyun  * The above copyright notice and this permission notice shall be included in
12*4882a593Smuzhiyun  * all copies or substantial portions of the Software.
13*4882a593Smuzhiyun  *
14*4882a593Smuzhiyun  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15*4882a593Smuzhiyun  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16*4882a593Smuzhiyun  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17*4882a593Smuzhiyun  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18*4882a593Smuzhiyun  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19*4882a593Smuzhiyun  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20*4882a593Smuzhiyun  * OTHER DEALINGS IN THE SOFTWARE.
21*4882a593Smuzhiyun  *
22*4882a593Smuzhiyun  * Authors: Dave Airlie
23*4882a593Smuzhiyun  *          Alon Levy
24*4882a593Smuzhiyun  */
25*4882a593Smuzhiyun 
26*4882a593Smuzhiyun #include "qxl_drv.h"
27*4882a593Smuzhiyun #include "qxl_object.h"
28*4882a593Smuzhiyun 
29*4882a593Smuzhiyun #include <linux/io-mapping.h>
qxl_ttm_bo_destroy(struct ttm_buffer_object * tbo)30*4882a593Smuzhiyun static void qxl_ttm_bo_destroy(struct ttm_buffer_object *tbo)
31*4882a593Smuzhiyun {
32*4882a593Smuzhiyun 	struct qxl_bo *bo;
33*4882a593Smuzhiyun 	struct qxl_device *qdev;
34*4882a593Smuzhiyun 
35*4882a593Smuzhiyun 	bo = to_qxl_bo(tbo);
36*4882a593Smuzhiyun 	qdev = to_qxl(bo->tbo.base.dev);
37*4882a593Smuzhiyun 
38*4882a593Smuzhiyun 	qxl_surface_evict(qdev, bo, false);
39*4882a593Smuzhiyun 	WARN_ON_ONCE(bo->map_count > 0);
40*4882a593Smuzhiyun 	mutex_lock(&qdev->gem.mutex);
41*4882a593Smuzhiyun 	list_del_init(&bo->list);
42*4882a593Smuzhiyun 	mutex_unlock(&qdev->gem.mutex);
43*4882a593Smuzhiyun 	drm_gem_object_release(&bo->tbo.base);
44*4882a593Smuzhiyun 	kfree(bo);
45*4882a593Smuzhiyun }
46*4882a593Smuzhiyun 
qxl_ttm_bo_is_qxl_bo(struct ttm_buffer_object * bo)47*4882a593Smuzhiyun bool qxl_ttm_bo_is_qxl_bo(struct ttm_buffer_object *bo)
48*4882a593Smuzhiyun {
49*4882a593Smuzhiyun 	if (bo->destroy == &qxl_ttm_bo_destroy)
50*4882a593Smuzhiyun 		return true;
51*4882a593Smuzhiyun 	return false;
52*4882a593Smuzhiyun }
53*4882a593Smuzhiyun 
qxl_ttm_placement_from_domain(struct qxl_bo * qbo,u32 domain,bool pinned)54*4882a593Smuzhiyun void qxl_ttm_placement_from_domain(struct qxl_bo *qbo, u32 domain, bool pinned)
55*4882a593Smuzhiyun {
56*4882a593Smuzhiyun 	u32 c = 0;
57*4882a593Smuzhiyun 	u32 pflag = 0;
58*4882a593Smuzhiyun 	unsigned int i;
59*4882a593Smuzhiyun 
60*4882a593Smuzhiyun 	if (pinned)
61*4882a593Smuzhiyun 		pflag |= TTM_PL_FLAG_NO_EVICT;
62*4882a593Smuzhiyun 	if (qbo->tbo.base.size <= PAGE_SIZE)
63*4882a593Smuzhiyun 		pflag |= TTM_PL_FLAG_TOPDOWN;
64*4882a593Smuzhiyun 
65*4882a593Smuzhiyun 	qbo->placement.placement = qbo->placements;
66*4882a593Smuzhiyun 	qbo->placement.busy_placement = qbo->placements;
67*4882a593Smuzhiyun 	if (domain == QXL_GEM_DOMAIN_VRAM) {
68*4882a593Smuzhiyun 		qbo->placements[c].mem_type = TTM_PL_VRAM;
69*4882a593Smuzhiyun 		qbo->placements[c++].flags = TTM_PL_FLAG_CACHED | pflag;
70*4882a593Smuzhiyun 	}
71*4882a593Smuzhiyun 	if (domain == QXL_GEM_DOMAIN_SURFACE) {
72*4882a593Smuzhiyun 		qbo->placements[c].mem_type = TTM_PL_PRIV;
73*4882a593Smuzhiyun 		qbo->placements[c++].flags = TTM_PL_FLAG_CACHED | pflag;
74*4882a593Smuzhiyun 		qbo->placements[c].mem_type = TTM_PL_VRAM;
75*4882a593Smuzhiyun 		qbo->placements[c++].flags = TTM_PL_FLAG_CACHED | pflag;
76*4882a593Smuzhiyun 	}
77*4882a593Smuzhiyun 	if (domain == QXL_GEM_DOMAIN_CPU) {
78*4882a593Smuzhiyun 		qbo->placements[c].mem_type = TTM_PL_SYSTEM;
79*4882a593Smuzhiyun 		qbo->placements[c++].flags = TTM_PL_MASK_CACHING | pflag;
80*4882a593Smuzhiyun 	}
81*4882a593Smuzhiyun 	if (!c) {
82*4882a593Smuzhiyun 		qbo->placements[c].mem_type = TTM_PL_SYSTEM;
83*4882a593Smuzhiyun 		qbo->placements[c++].flags = TTM_PL_MASK_CACHING;
84*4882a593Smuzhiyun 	}
85*4882a593Smuzhiyun 	qbo->placement.num_placement = c;
86*4882a593Smuzhiyun 	qbo->placement.num_busy_placement = c;
87*4882a593Smuzhiyun 	for (i = 0; i < c; ++i) {
88*4882a593Smuzhiyun 		qbo->placements[i].fpfn = 0;
89*4882a593Smuzhiyun 		qbo->placements[i].lpfn = 0;
90*4882a593Smuzhiyun 	}
91*4882a593Smuzhiyun }
92*4882a593Smuzhiyun 
93*4882a593Smuzhiyun static const struct drm_gem_object_funcs qxl_object_funcs = {
94*4882a593Smuzhiyun 	.free = qxl_gem_object_free,
95*4882a593Smuzhiyun 	.open = qxl_gem_object_open,
96*4882a593Smuzhiyun 	.close = qxl_gem_object_close,
97*4882a593Smuzhiyun 	.pin = qxl_gem_prime_pin,
98*4882a593Smuzhiyun 	.unpin = qxl_gem_prime_unpin,
99*4882a593Smuzhiyun 	.get_sg_table = qxl_gem_prime_get_sg_table,
100*4882a593Smuzhiyun 	.vmap = qxl_gem_prime_vmap,
101*4882a593Smuzhiyun 	.vunmap = qxl_gem_prime_vunmap,
102*4882a593Smuzhiyun 	.mmap = drm_gem_ttm_mmap,
103*4882a593Smuzhiyun 	.print_info = drm_gem_ttm_print_info,
104*4882a593Smuzhiyun };
105*4882a593Smuzhiyun 
qxl_bo_create(struct qxl_device * qdev,unsigned long size,bool kernel,bool pinned,u32 domain,u32 priority,struct qxl_surface * surf,struct qxl_bo ** bo_ptr)106*4882a593Smuzhiyun int qxl_bo_create(struct qxl_device *qdev, unsigned long size,
107*4882a593Smuzhiyun 		  bool kernel, bool pinned, u32 domain, u32 priority,
108*4882a593Smuzhiyun 		  struct qxl_surface *surf,
109*4882a593Smuzhiyun 		  struct qxl_bo **bo_ptr)
110*4882a593Smuzhiyun {
111*4882a593Smuzhiyun 	struct qxl_bo *bo;
112*4882a593Smuzhiyun 	enum ttm_bo_type type;
113*4882a593Smuzhiyun 	int r;
114*4882a593Smuzhiyun 
115*4882a593Smuzhiyun 	if (kernel)
116*4882a593Smuzhiyun 		type = ttm_bo_type_kernel;
117*4882a593Smuzhiyun 	else
118*4882a593Smuzhiyun 		type = ttm_bo_type_device;
119*4882a593Smuzhiyun 	*bo_ptr = NULL;
120*4882a593Smuzhiyun 	bo = kzalloc(sizeof(struct qxl_bo), GFP_KERNEL);
121*4882a593Smuzhiyun 	if (bo == NULL)
122*4882a593Smuzhiyun 		return -ENOMEM;
123*4882a593Smuzhiyun 	size = roundup(size, PAGE_SIZE);
124*4882a593Smuzhiyun 	r = drm_gem_object_init(&qdev->ddev, &bo->tbo.base, size);
125*4882a593Smuzhiyun 	if (unlikely(r)) {
126*4882a593Smuzhiyun 		kfree(bo);
127*4882a593Smuzhiyun 		return r;
128*4882a593Smuzhiyun 	}
129*4882a593Smuzhiyun 	bo->tbo.base.funcs = &qxl_object_funcs;
130*4882a593Smuzhiyun 	bo->type = domain;
131*4882a593Smuzhiyun 	bo->pin_count = pinned ? 1 : 0;
132*4882a593Smuzhiyun 	bo->surface_id = 0;
133*4882a593Smuzhiyun 	INIT_LIST_HEAD(&bo->list);
134*4882a593Smuzhiyun 
135*4882a593Smuzhiyun 	if (surf)
136*4882a593Smuzhiyun 		bo->surf = *surf;
137*4882a593Smuzhiyun 
138*4882a593Smuzhiyun 	qxl_ttm_placement_from_domain(bo, domain, pinned);
139*4882a593Smuzhiyun 
140*4882a593Smuzhiyun 	bo->tbo.priority = priority;
141*4882a593Smuzhiyun 	r = ttm_bo_init(&qdev->mman.bdev, &bo->tbo, size, type,
142*4882a593Smuzhiyun 			&bo->placement, 0, !kernel, size,
143*4882a593Smuzhiyun 			NULL, NULL, &qxl_ttm_bo_destroy);
144*4882a593Smuzhiyun 	if (unlikely(r != 0)) {
145*4882a593Smuzhiyun 		if (r != -ERESTARTSYS)
146*4882a593Smuzhiyun 			dev_err(qdev->ddev.dev,
147*4882a593Smuzhiyun 				"object_init failed for (%lu, 0x%08X)\n",
148*4882a593Smuzhiyun 				size, domain);
149*4882a593Smuzhiyun 		return r;
150*4882a593Smuzhiyun 	}
151*4882a593Smuzhiyun 	*bo_ptr = bo;
152*4882a593Smuzhiyun 	return 0;
153*4882a593Smuzhiyun }
154*4882a593Smuzhiyun 
qxl_bo_kmap(struct qxl_bo * bo,void ** ptr)155*4882a593Smuzhiyun int qxl_bo_kmap(struct qxl_bo *bo, void **ptr)
156*4882a593Smuzhiyun {
157*4882a593Smuzhiyun 	bool is_iomem;
158*4882a593Smuzhiyun 	int r;
159*4882a593Smuzhiyun 
160*4882a593Smuzhiyun 	if (bo->kptr) {
161*4882a593Smuzhiyun 		if (ptr)
162*4882a593Smuzhiyun 			*ptr = bo->kptr;
163*4882a593Smuzhiyun 		bo->map_count++;
164*4882a593Smuzhiyun 		return 0;
165*4882a593Smuzhiyun 	}
166*4882a593Smuzhiyun 	r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages, &bo->kmap);
167*4882a593Smuzhiyun 	if (r)
168*4882a593Smuzhiyun 		return r;
169*4882a593Smuzhiyun 	bo->kptr = ttm_kmap_obj_virtual(&bo->kmap, &is_iomem);
170*4882a593Smuzhiyun 	if (ptr)
171*4882a593Smuzhiyun 		*ptr = bo->kptr;
172*4882a593Smuzhiyun 	bo->map_count = 1;
173*4882a593Smuzhiyun 	return 0;
174*4882a593Smuzhiyun }
175*4882a593Smuzhiyun 
qxl_bo_kmap_atomic_page(struct qxl_device * qdev,struct qxl_bo * bo,int page_offset)176*4882a593Smuzhiyun void *qxl_bo_kmap_atomic_page(struct qxl_device *qdev,
177*4882a593Smuzhiyun 			      struct qxl_bo *bo, int page_offset)
178*4882a593Smuzhiyun {
179*4882a593Smuzhiyun 	unsigned long offset;
180*4882a593Smuzhiyun 	void *rptr;
181*4882a593Smuzhiyun 	int ret;
182*4882a593Smuzhiyun 	struct io_mapping *map;
183*4882a593Smuzhiyun 
184*4882a593Smuzhiyun 	if (bo->tbo.mem.mem_type == TTM_PL_VRAM)
185*4882a593Smuzhiyun 		map = qdev->vram_mapping;
186*4882a593Smuzhiyun 	else if (bo->tbo.mem.mem_type == TTM_PL_PRIV)
187*4882a593Smuzhiyun 		map = qdev->surface_mapping;
188*4882a593Smuzhiyun 	else
189*4882a593Smuzhiyun 		goto fallback;
190*4882a593Smuzhiyun 
191*4882a593Smuzhiyun 	offset = bo->tbo.mem.start << PAGE_SHIFT;
192*4882a593Smuzhiyun 	return io_mapping_map_atomic_wc(map, offset + page_offset);
193*4882a593Smuzhiyun fallback:
194*4882a593Smuzhiyun 	if (bo->kptr) {
195*4882a593Smuzhiyun 		rptr = bo->kptr + (page_offset * PAGE_SIZE);
196*4882a593Smuzhiyun 		return rptr;
197*4882a593Smuzhiyun 	}
198*4882a593Smuzhiyun 
199*4882a593Smuzhiyun 	ret = qxl_bo_kmap(bo, &rptr);
200*4882a593Smuzhiyun 	if (ret)
201*4882a593Smuzhiyun 		return NULL;
202*4882a593Smuzhiyun 
203*4882a593Smuzhiyun 	rptr += page_offset * PAGE_SIZE;
204*4882a593Smuzhiyun 	return rptr;
205*4882a593Smuzhiyun }
206*4882a593Smuzhiyun 
qxl_bo_kunmap(struct qxl_bo * bo)207*4882a593Smuzhiyun void qxl_bo_kunmap(struct qxl_bo *bo)
208*4882a593Smuzhiyun {
209*4882a593Smuzhiyun 	if (bo->kptr == NULL)
210*4882a593Smuzhiyun 		return;
211*4882a593Smuzhiyun 	bo->map_count--;
212*4882a593Smuzhiyun 	if (bo->map_count > 0)
213*4882a593Smuzhiyun 		return;
214*4882a593Smuzhiyun 	bo->kptr = NULL;
215*4882a593Smuzhiyun 	ttm_bo_kunmap(&bo->kmap);
216*4882a593Smuzhiyun }
217*4882a593Smuzhiyun 
qxl_bo_kunmap_atomic_page(struct qxl_device * qdev,struct qxl_bo * bo,void * pmap)218*4882a593Smuzhiyun void qxl_bo_kunmap_atomic_page(struct qxl_device *qdev,
219*4882a593Smuzhiyun 			       struct qxl_bo *bo, void *pmap)
220*4882a593Smuzhiyun {
221*4882a593Smuzhiyun 	if ((bo->tbo.mem.mem_type != TTM_PL_VRAM) &&
222*4882a593Smuzhiyun 	    (bo->tbo.mem.mem_type != TTM_PL_PRIV))
223*4882a593Smuzhiyun 		goto fallback;
224*4882a593Smuzhiyun 
225*4882a593Smuzhiyun 	io_mapping_unmap_atomic(pmap);
226*4882a593Smuzhiyun 	return;
227*4882a593Smuzhiyun  fallback:
228*4882a593Smuzhiyun 	qxl_bo_kunmap(bo);
229*4882a593Smuzhiyun }
230*4882a593Smuzhiyun 
qxl_bo_unref(struct qxl_bo ** bo)231*4882a593Smuzhiyun void qxl_bo_unref(struct qxl_bo **bo)
232*4882a593Smuzhiyun {
233*4882a593Smuzhiyun 	if ((*bo) == NULL)
234*4882a593Smuzhiyun 		return;
235*4882a593Smuzhiyun 
236*4882a593Smuzhiyun 	drm_gem_object_put(&(*bo)->tbo.base);
237*4882a593Smuzhiyun 	*bo = NULL;
238*4882a593Smuzhiyun }
239*4882a593Smuzhiyun 
qxl_bo_ref(struct qxl_bo * bo)240*4882a593Smuzhiyun struct qxl_bo *qxl_bo_ref(struct qxl_bo *bo)
241*4882a593Smuzhiyun {
242*4882a593Smuzhiyun 	drm_gem_object_get(&bo->tbo.base);
243*4882a593Smuzhiyun 	return bo;
244*4882a593Smuzhiyun }
245*4882a593Smuzhiyun 
__qxl_bo_pin(struct qxl_bo * bo)246*4882a593Smuzhiyun static int __qxl_bo_pin(struct qxl_bo *bo)
247*4882a593Smuzhiyun {
248*4882a593Smuzhiyun 	struct ttm_operation_ctx ctx = { false, false };
249*4882a593Smuzhiyun 	struct drm_device *ddev = bo->tbo.base.dev;
250*4882a593Smuzhiyun 	int r;
251*4882a593Smuzhiyun 
252*4882a593Smuzhiyun 	if (bo->pin_count) {
253*4882a593Smuzhiyun 		bo->pin_count++;
254*4882a593Smuzhiyun 		return 0;
255*4882a593Smuzhiyun 	}
256*4882a593Smuzhiyun 	qxl_ttm_placement_from_domain(bo, bo->type, true);
257*4882a593Smuzhiyun 	r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
258*4882a593Smuzhiyun 	if (likely(r == 0)) {
259*4882a593Smuzhiyun 		bo->pin_count = 1;
260*4882a593Smuzhiyun 	}
261*4882a593Smuzhiyun 	if (unlikely(r != 0))
262*4882a593Smuzhiyun 		dev_err(ddev->dev, "%p pin failed\n", bo);
263*4882a593Smuzhiyun 	return r;
264*4882a593Smuzhiyun }
265*4882a593Smuzhiyun 
__qxl_bo_unpin(struct qxl_bo * bo)266*4882a593Smuzhiyun static int __qxl_bo_unpin(struct qxl_bo *bo)
267*4882a593Smuzhiyun {
268*4882a593Smuzhiyun 	struct ttm_operation_ctx ctx = { false, false };
269*4882a593Smuzhiyun 	struct drm_device *ddev = bo->tbo.base.dev;
270*4882a593Smuzhiyun 	int r, i;
271*4882a593Smuzhiyun 
272*4882a593Smuzhiyun 	if (!bo->pin_count) {
273*4882a593Smuzhiyun 		dev_warn(ddev->dev, "%p unpin not necessary\n", bo);
274*4882a593Smuzhiyun 		return 0;
275*4882a593Smuzhiyun 	}
276*4882a593Smuzhiyun 	bo->pin_count--;
277*4882a593Smuzhiyun 	if (bo->pin_count)
278*4882a593Smuzhiyun 		return 0;
279*4882a593Smuzhiyun 	for (i = 0; i < bo->placement.num_placement; i++)
280*4882a593Smuzhiyun 		bo->placements[i].flags &= ~TTM_PL_FLAG_NO_EVICT;
281*4882a593Smuzhiyun 	r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
282*4882a593Smuzhiyun 	if (unlikely(r != 0))
283*4882a593Smuzhiyun 		dev_err(ddev->dev, "%p validate failed for unpin\n", bo);
284*4882a593Smuzhiyun 	return r;
285*4882a593Smuzhiyun }
286*4882a593Smuzhiyun 
287*4882a593Smuzhiyun /*
288*4882a593Smuzhiyun  * Reserve the BO before pinning the object.  If the BO was reserved
289*4882a593Smuzhiyun  * beforehand, use the internal version directly __qxl_bo_pin.
290*4882a593Smuzhiyun  *
291*4882a593Smuzhiyun  */
qxl_bo_pin(struct qxl_bo * bo)292*4882a593Smuzhiyun int qxl_bo_pin(struct qxl_bo *bo)
293*4882a593Smuzhiyun {
294*4882a593Smuzhiyun 	int r;
295*4882a593Smuzhiyun 
296*4882a593Smuzhiyun 	r = qxl_bo_reserve(bo);
297*4882a593Smuzhiyun 	if (r)
298*4882a593Smuzhiyun 		return r;
299*4882a593Smuzhiyun 
300*4882a593Smuzhiyun 	r = __qxl_bo_pin(bo);
301*4882a593Smuzhiyun 	qxl_bo_unreserve(bo);
302*4882a593Smuzhiyun 	return r;
303*4882a593Smuzhiyun }
304*4882a593Smuzhiyun 
305*4882a593Smuzhiyun /*
306*4882a593Smuzhiyun  * Reserve the BO before pinning the object.  If the BO was reserved
307*4882a593Smuzhiyun  * beforehand, use the internal version directly __qxl_bo_unpin.
308*4882a593Smuzhiyun  *
309*4882a593Smuzhiyun  */
qxl_bo_unpin(struct qxl_bo * bo)310*4882a593Smuzhiyun int qxl_bo_unpin(struct qxl_bo *bo)
311*4882a593Smuzhiyun {
312*4882a593Smuzhiyun 	int r;
313*4882a593Smuzhiyun 
314*4882a593Smuzhiyun 	r = qxl_bo_reserve(bo);
315*4882a593Smuzhiyun 	if (r)
316*4882a593Smuzhiyun 		return r;
317*4882a593Smuzhiyun 
318*4882a593Smuzhiyun 	r = __qxl_bo_unpin(bo);
319*4882a593Smuzhiyun 	qxl_bo_unreserve(bo);
320*4882a593Smuzhiyun 	return r;
321*4882a593Smuzhiyun }
322*4882a593Smuzhiyun 
qxl_bo_force_delete(struct qxl_device * qdev)323*4882a593Smuzhiyun void qxl_bo_force_delete(struct qxl_device *qdev)
324*4882a593Smuzhiyun {
325*4882a593Smuzhiyun 	struct qxl_bo *bo, *n;
326*4882a593Smuzhiyun 
327*4882a593Smuzhiyun 	if (list_empty(&qdev->gem.objects))
328*4882a593Smuzhiyun 		return;
329*4882a593Smuzhiyun 	dev_err(qdev->ddev.dev, "Userspace still has active objects !\n");
330*4882a593Smuzhiyun 	list_for_each_entry_safe(bo, n, &qdev->gem.objects, list) {
331*4882a593Smuzhiyun 		dev_err(qdev->ddev.dev, "%p %p %lu %lu force free\n",
332*4882a593Smuzhiyun 			&bo->tbo.base, bo, (unsigned long)bo->tbo.base.size,
333*4882a593Smuzhiyun 			*((unsigned long *)&bo->tbo.base.refcount));
334*4882a593Smuzhiyun 		mutex_lock(&qdev->gem.mutex);
335*4882a593Smuzhiyun 		list_del_init(&bo->list);
336*4882a593Smuzhiyun 		mutex_unlock(&qdev->gem.mutex);
337*4882a593Smuzhiyun 		/* this should unref the ttm bo */
338*4882a593Smuzhiyun 		drm_gem_object_put(&bo->tbo.base);
339*4882a593Smuzhiyun 	}
340*4882a593Smuzhiyun }
341*4882a593Smuzhiyun 
qxl_bo_init(struct qxl_device * qdev)342*4882a593Smuzhiyun int qxl_bo_init(struct qxl_device *qdev)
343*4882a593Smuzhiyun {
344*4882a593Smuzhiyun 	return qxl_ttm_init(qdev);
345*4882a593Smuzhiyun }
346*4882a593Smuzhiyun 
qxl_bo_fini(struct qxl_device * qdev)347*4882a593Smuzhiyun void qxl_bo_fini(struct qxl_device *qdev)
348*4882a593Smuzhiyun {
349*4882a593Smuzhiyun 	qxl_ttm_fini(qdev);
350*4882a593Smuzhiyun }
351*4882a593Smuzhiyun 
qxl_bo_check_id(struct qxl_device * qdev,struct qxl_bo * bo)352*4882a593Smuzhiyun int qxl_bo_check_id(struct qxl_device *qdev, struct qxl_bo *bo)
353*4882a593Smuzhiyun {
354*4882a593Smuzhiyun 	int ret;
355*4882a593Smuzhiyun 
356*4882a593Smuzhiyun 	if (bo->type == QXL_GEM_DOMAIN_SURFACE && bo->surface_id == 0) {
357*4882a593Smuzhiyun 		/* allocate a surface id for this surface now */
358*4882a593Smuzhiyun 		ret = qxl_surface_id_alloc(qdev, bo);
359*4882a593Smuzhiyun 		if (ret)
360*4882a593Smuzhiyun 			return ret;
361*4882a593Smuzhiyun 
362*4882a593Smuzhiyun 		ret = qxl_hw_surface_alloc(qdev, bo);
363*4882a593Smuzhiyun 		if (ret)
364*4882a593Smuzhiyun 			return ret;
365*4882a593Smuzhiyun 	}
366*4882a593Smuzhiyun 	return 0;
367*4882a593Smuzhiyun }
368*4882a593Smuzhiyun 
qxl_surf_evict(struct qxl_device * qdev)369*4882a593Smuzhiyun int qxl_surf_evict(struct qxl_device *qdev)
370*4882a593Smuzhiyun {
371*4882a593Smuzhiyun 	return ttm_bo_evict_mm(&qdev->mman.bdev, TTM_PL_PRIV);
372*4882a593Smuzhiyun }
373*4882a593Smuzhiyun 
qxl_vram_evict(struct qxl_device * qdev)374*4882a593Smuzhiyun int qxl_vram_evict(struct qxl_device *qdev)
375*4882a593Smuzhiyun {
376*4882a593Smuzhiyun 	return ttm_bo_evict_mm(&qdev->mman.bdev, TTM_PL_VRAM);
377*4882a593Smuzhiyun }
378