xref: /OK3568_Linux_fs/kernel/drivers/gpu/drm/qxl/qxl_release.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun  * Copyright 2011 Red Hat, Inc.
3*4882a593Smuzhiyun  *
4*4882a593Smuzhiyun  * Permission is hereby granted, free of charge, to any person obtaining a
5*4882a593Smuzhiyun  * copy of this software and associated documentation files (the "Software"),
6*4882a593Smuzhiyun  * to deal in the Software without restriction, including without limitation
7*4882a593Smuzhiyun  * on the rights to use, copy, modify, merge, publish, distribute, sub
8*4882a593Smuzhiyun  * license, and/or sell copies of the Software, and to permit persons to whom
9*4882a593Smuzhiyun  * the Software is furnished to do so, subject to the following conditions:
10*4882a593Smuzhiyun  *
11*4882a593Smuzhiyun  * The above copyright notice and this permission notice (including the next
12*4882a593Smuzhiyun  * paragraph) shall be included in all copies or substantial portions of the
13*4882a593Smuzhiyun  * Software.
14*4882a593Smuzhiyun  *
15*4882a593Smuzhiyun  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16*4882a593Smuzhiyun  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17*4882a593Smuzhiyun  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.  IN NO EVENT SHALL
18*4882a593Smuzhiyun  * THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
19*4882a593Smuzhiyun  * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
20*4882a593Smuzhiyun  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
21*4882a593Smuzhiyun  */
22*4882a593Smuzhiyun 
23*4882a593Smuzhiyun #include <linux/delay.h>
24*4882a593Smuzhiyun 
25*4882a593Smuzhiyun #include <trace/events/dma_fence.h>
26*4882a593Smuzhiyun 
27*4882a593Smuzhiyun #include "qxl_drv.h"
28*4882a593Smuzhiyun #include "qxl_object.h"
29*4882a593Smuzhiyun 
30*4882a593Smuzhiyun /*
31*4882a593Smuzhiyun  * drawable cmd cache - allocate a bunch of VRAM pages, suballocate
32*4882a593Smuzhiyun  * into 256 byte chunks for now - gives 16 cmds per page.
33*4882a593Smuzhiyun  *
34*4882a593Smuzhiyun  * use an ida to index into the chunks?
35*4882a593Smuzhiyun  */
36*4882a593Smuzhiyun /* manage releaseables */
37*4882a593Smuzhiyun /* stack them 16 high for now -drawable object is 191 */
38*4882a593Smuzhiyun #define RELEASE_SIZE 256
39*4882a593Smuzhiyun #define RELEASES_PER_BO (4096 / RELEASE_SIZE)
40*4882a593Smuzhiyun /* put an alloc/dealloc surface cmd into one bo and round up to 128 */
41*4882a593Smuzhiyun #define SURFACE_RELEASE_SIZE 128
42*4882a593Smuzhiyun #define SURFACE_RELEASES_PER_BO (4096 / SURFACE_RELEASE_SIZE)
43*4882a593Smuzhiyun 
44*4882a593Smuzhiyun static const int release_size_per_bo[] = { RELEASE_SIZE, SURFACE_RELEASE_SIZE, RELEASE_SIZE };
45*4882a593Smuzhiyun static const int releases_per_bo[] = { RELEASES_PER_BO, SURFACE_RELEASES_PER_BO, RELEASES_PER_BO };
46*4882a593Smuzhiyun 
qxl_get_driver_name(struct dma_fence * fence)47*4882a593Smuzhiyun static const char *qxl_get_driver_name(struct dma_fence *fence)
48*4882a593Smuzhiyun {
49*4882a593Smuzhiyun 	return "qxl";
50*4882a593Smuzhiyun }
51*4882a593Smuzhiyun 
qxl_get_timeline_name(struct dma_fence * fence)52*4882a593Smuzhiyun static const char *qxl_get_timeline_name(struct dma_fence *fence)
53*4882a593Smuzhiyun {
54*4882a593Smuzhiyun 	return "release";
55*4882a593Smuzhiyun }
56*4882a593Smuzhiyun 
qxl_fence_wait(struct dma_fence * fence,bool intr,signed long timeout)57*4882a593Smuzhiyun static long qxl_fence_wait(struct dma_fence *fence, bool intr,
58*4882a593Smuzhiyun 			   signed long timeout)
59*4882a593Smuzhiyun {
60*4882a593Smuzhiyun 	struct qxl_device *qdev;
61*4882a593Smuzhiyun 	struct qxl_release *release;
62*4882a593Smuzhiyun 	int count = 0, sc = 0;
63*4882a593Smuzhiyun 	bool have_drawable_releases;
64*4882a593Smuzhiyun 	unsigned long cur, end = jiffies + timeout;
65*4882a593Smuzhiyun 
66*4882a593Smuzhiyun 	qdev = container_of(fence->lock, struct qxl_device, release_lock);
67*4882a593Smuzhiyun 	release = container_of(fence, struct qxl_release, base);
68*4882a593Smuzhiyun 	have_drawable_releases = release->type == QXL_RELEASE_DRAWABLE;
69*4882a593Smuzhiyun 
70*4882a593Smuzhiyun retry:
71*4882a593Smuzhiyun 	sc++;
72*4882a593Smuzhiyun 
73*4882a593Smuzhiyun 	if (dma_fence_is_signaled(fence))
74*4882a593Smuzhiyun 		goto signaled;
75*4882a593Smuzhiyun 
76*4882a593Smuzhiyun 	qxl_io_notify_oom(qdev);
77*4882a593Smuzhiyun 
78*4882a593Smuzhiyun 	for (count = 0; count < 11; count++) {
79*4882a593Smuzhiyun 		if (!qxl_queue_garbage_collect(qdev, true))
80*4882a593Smuzhiyun 			break;
81*4882a593Smuzhiyun 
82*4882a593Smuzhiyun 		if (dma_fence_is_signaled(fence))
83*4882a593Smuzhiyun 			goto signaled;
84*4882a593Smuzhiyun 	}
85*4882a593Smuzhiyun 
86*4882a593Smuzhiyun 	if (dma_fence_is_signaled(fence))
87*4882a593Smuzhiyun 		goto signaled;
88*4882a593Smuzhiyun 
89*4882a593Smuzhiyun 	if (have_drawable_releases || sc < 4) {
90*4882a593Smuzhiyun 		if (sc > 2)
91*4882a593Smuzhiyun 			/* back off */
92*4882a593Smuzhiyun 			usleep_range(500, 1000);
93*4882a593Smuzhiyun 
94*4882a593Smuzhiyun 		if (time_after(jiffies, end))
95*4882a593Smuzhiyun 			return 0;
96*4882a593Smuzhiyun 
97*4882a593Smuzhiyun 		if (have_drawable_releases && sc > 300) {
98*4882a593Smuzhiyun 			DMA_FENCE_WARN(fence, "failed to wait on release %llu "
99*4882a593Smuzhiyun 				       "after spincount %d\n",
100*4882a593Smuzhiyun 				       fence->context & ~0xf0000000, sc);
101*4882a593Smuzhiyun 			goto signaled;
102*4882a593Smuzhiyun 		}
103*4882a593Smuzhiyun 		goto retry;
104*4882a593Smuzhiyun 	}
105*4882a593Smuzhiyun 	/*
106*4882a593Smuzhiyun 	 * yeah, original sync_obj_wait gave up after 3 spins when
107*4882a593Smuzhiyun 	 * have_drawable_releases is not set.
108*4882a593Smuzhiyun 	 */
109*4882a593Smuzhiyun 
110*4882a593Smuzhiyun signaled:
111*4882a593Smuzhiyun 	cur = jiffies;
112*4882a593Smuzhiyun 	if (time_after(cur, end))
113*4882a593Smuzhiyun 		return 0;
114*4882a593Smuzhiyun 	return end - cur;
115*4882a593Smuzhiyun }
116*4882a593Smuzhiyun 
117*4882a593Smuzhiyun static const struct dma_fence_ops qxl_fence_ops = {
118*4882a593Smuzhiyun 	.get_driver_name = qxl_get_driver_name,
119*4882a593Smuzhiyun 	.get_timeline_name = qxl_get_timeline_name,
120*4882a593Smuzhiyun 	.wait = qxl_fence_wait,
121*4882a593Smuzhiyun };
122*4882a593Smuzhiyun 
123*4882a593Smuzhiyun static int
qxl_release_alloc(struct qxl_device * qdev,int type,struct qxl_release ** ret)124*4882a593Smuzhiyun qxl_release_alloc(struct qxl_device *qdev, int type,
125*4882a593Smuzhiyun 		  struct qxl_release **ret)
126*4882a593Smuzhiyun {
127*4882a593Smuzhiyun 	struct qxl_release *release;
128*4882a593Smuzhiyun 	int handle;
129*4882a593Smuzhiyun 	size_t size = sizeof(*release);
130*4882a593Smuzhiyun 
131*4882a593Smuzhiyun 	release = kmalloc(size, GFP_KERNEL);
132*4882a593Smuzhiyun 	if (!release) {
133*4882a593Smuzhiyun 		DRM_ERROR("Out of memory\n");
134*4882a593Smuzhiyun 		return -ENOMEM;
135*4882a593Smuzhiyun 	}
136*4882a593Smuzhiyun 	release->base.ops = NULL;
137*4882a593Smuzhiyun 	release->type = type;
138*4882a593Smuzhiyun 	release->release_offset = 0;
139*4882a593Smuzhiyun 	release->surface_release_id = 0;
140*4882a593Smuzhiyun 	INIT_LIST_HEAD(&release->bos);
141*4882a593Smuzhiyun 
142*4882a593Smuzhiyun 	idr_preload(GFP_KERNEL);
143*4882a593Smuzhiyun 	spin_lock(&qdev->release_idr_lock);
144*4882a593Smuzhiyun 	handle = idr_alloc(&qdev->release_idr, release, 1, 0, GFP_NOWAIT);
145*4882a593Smuzhiyun 	release->base.seqno = ++qdev->release_seqno;
146*4882a593Smuzhiyun 	spin_unlock(&qdev->release_idr_lock);
147*4882a593Smuzhiyun 	idr_preload_end();
148*4882a593Smuzhiyun 	if (handle < 0) {
149*4882a593Smuzhiyun 		kfree(release);
150*4882a593Smuzhiyun 		*ret = NULL;
151*4882a593Smuzhiyun 		return handle;
152*4882a593Smuzhiyun 	}
153*4882a593Smuzhiyun 	*ret = release;
154*4882a593Smuzhiyun 	DRM_DEBUG_DRIVER("allocated release %d\n", handle);
155*4882a593Smuzhiyun 	release->id = handle;
156*4882a593Smuzhiyun 	return handle;
157*4882a593Smuzhiyun }
158*4882a593Smuzhiyun 
159*4882a593Smuzhiyun static void
qxl_release_free_list(struct qxl_release * release)160*4882a593Smuzhiyun qxl_release_free_list(struct qxl_release *release)
161*4882a593Smuzhiyun {
162*4882a593Smuzhiyun 	while (!list_empty(&release->bos)) {
163*4882a593Smuzhiyun 		struct qxl_bo_list *entry;
164*4882a593Smuzhiyun 		struct qxl_bo *bo;
165*4882a593Smuzhiyun 
166*4882a593Smuzhiyun 		entry = container_of(release->bos.next,
167*4882a593Smuzhiyun 				     struct qxl_bo_list, tv.head);
168*4882a593Smuzhiyun 		bo = to_qxl_bo(entry->tv.bo);
169*4882a593Smuzhiyun 		qxl_bo_unref(&bo);
170*4882a593Smuzhiyun 		list_del(&entry->tv.head);
171*4882a593Smuzhiyun 		kfree(entry);
172*4882a593Smuzhiyun 	}
173*4882a593Smuzhiyun 	release->release_bo = NULL;
174*4882a593Smuzhiyun }
175*4882a593Smuzhiyun 
176*4882a593Smuzhiyun void
qxl_release_free(struct qxl_device * qdev,struct qxl_release * release)177*4882a593Smuzhiyun qxl_release_free(struct qxl_device *qdev,
178*4882a593Smuzhiyun 		 struct qxl_release *release)
179*4882a593Smuzhiyun {
180*4882a593Smuzhiyun 	DRM_DEBUG_DRIVER("release %d, type %d\n", release->id, release->type);
181*4882a593Smuzhiyun 
182*4882a593Smuzhiyun 	if (release->surface_release_id)
183*4882a593Smuzhiyun 		qxl_surface_id_dealloc(qdev, release->surface_release_id);
184*4882a593Smuzhiyun 
185*4882a593Smuzhiyun 	spin_lock(&qdev->release_idr_lock);
186*4882a593Smuzhiyun 	idr_remove(&qdev->release_idr, release->id);
187*4882a593Smuzhiyun 	spin_unlock(&qdev->release_idr_lock);
188*4882a593Smuzhiyun 
189*4882a593Smuzhiyun 	if (release->base.ops) {
190*4882a593Smuzhiyun 		WARN_ON(list_empty(&release->bos));
191*4882a593Smuzhiyun 		qxl_release_free_list(release);
192*4882a593Smuzhiyun 
193*4882a593Smuzhiyun 		dma_fence_signal(&release->base);
194*4882a593Smuzhiyun 		dma_fence_put(&release->base);
195*4882a593Smuzhiyun 	} else {
196*4882a593Smuzhiyun 		qxl_release_free_list(release);
197*4882a593Smuzhiyun 		kfree(release);
198*4882a593Smuzhiyun 	}
199*4882a593Smuzhiyun }
200*4882a593Smuzhiyun 
qxl_release_bo_alloc(struct qxl_device * qdev,struct qxl_bo ** bo,u32 priority)201*4882a593Smuzhiyun static int qxl_release_bo_alloc(struct qxl_device *qdev,
202*4882a593Smuzhiyun 				struct qxl_bo **bo,
203*4882a593Smuzhiyun 				u32 priority)
204*4882a593Smuzhiyun {
205*4882a593Smuzhiyun 	/* pin releases bo's they are too messy to evict */
206*4882a593Smuzhiyun 	return qxl_bo_create(qdev, PAGE_SIZE, false, true,
207*4882a593Smuzhiyun 			     QXL_GEM_DOMAIN_VRAM, priority, NULL, bo);
208*4882a593Smuzhiyun }
209*4882a593Smuzhiyun 
qxl_release_list_add(struct qxl_release * release,struct qxl_bo * bo)210*4882a593Smuzhiyun int qxl_release_list_add(struct qxl_release *release, struct qxl_bo *bo)
211*4882a593Smuzhiyun {
212*4882a593Smuzhiyun 	struct qxl_bo_list *entry;
213*4882a593Smuzhiyun 
214*4882a593Smuzhiyun 	list_for_each_entry(entry, &release->bos, tv.head) {
215*4882a593Smuzhiyun 		if (entry->tv.bo == &bo->tbo)
216*4882a593Smuzhiyun 			return 0;
217*4882a593Smuzhiyun 	}
218*4882a593Smuzhiyun 
219*4882a593Smuzhiyun 	entry = kmalloc(sizeof(struct qxl_bo_list), GFP_KERNEL);
220*4882a593Smuzhiyun 	if (!entry)
221*4882a593Smuzhiyun 		return -ENOMEM;
222*4882a593Smuzhiyun 
223*4882a593Smuzhiyun 	qxl_bo_ref(bo);
224*4882a593Smuzhiyun 	entry->tv.bo = &bo->tbo;
225*4882a593Smuzhiyun 	entry->tv.num_shared = 0;
226*4882a593Smuzhiyun 	list_add_tail(&entry->tv.head, &release->bos);
227*4882a593Smuzhiyun 	return 0;
228*4882a593Smuzhiyun }
229*4882a593Smuzhiyun 
qxl_release_validate_bo(struct qxl_bo * bo)230*4882a593Smuzhiyun static int qxl_release_validate_bo(struct qxl_bo *bo)
231*4882a593Smuzhiyun {
232*4882a593Smuzhiyun 	struct ttm_operation_ctx ctx = { true, false };
233*4882a593Smuzhiyun 	int ret;
234*4882a593Smuzhiyun 
235*4882a593Smuzhiyun 	if (!bo->pin_count) {
236*4882a593Smuzhiyun 		qxl_ttm_placement_from_domain(bo, bo->type, false);
237*4882a593Smuzhiyun 		ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
238*4882a593Smuzhiyun 		if (ret)
239*4882a593Smuzhiyun 			return ret;
240*4882a593Smuzhiyun 	}
241*4882a593Smuzhiyun 
242*4882a593Smuzhiyun 	ret = dma_resv_reserve_shared(bo->tbo.base.resv, 1);
243*4882a593Smuzhiyun 	if (ret)
244*4882a593Smuzhiyun 		return ret;
245*4882a593Smuzhiyun 
246*4882a593Smuzhiyun 	/* allocate a surface for reserved + validated buffers */
247*4882a593Smuzhiyun 	ret = qxl_bo_check_id(to_qxl(bo->tbo.base.dev), bo);
248*4882a593Smuzhiyun 	if (ret)
249*4882a593Smuzhiyun 		return ret;
250*4882a593Smuzhiyun 	return 0;
251*4882a593Smuzhiyun }
252*4882a593Smuzhiyun 
qxl_release_reserve_list(struct qxl_release * release,bool no_intr)253*4882a593Smuzhiyun int qxl_release_reserve_list(struct qxl_release *release, bool no_intr)
254*4882a593Smuzhiyun {
255*4882a593Smuzhiyun 	int ret;
256*4882a593Smuzhiyun 	struct qxl_bo_list *entry;
257*4882a593Smuzhiyun 
258*4882a593Smuzhiyun 	/* if only one object on the release its the release itself
259*4882a593Smuzhiyun 	   since these objects are pinned no need to reserve */
260*4882a593Smuzhiyun 	if (list_is_singular(&release->bos))
261*4882a593Smuzhiyun 		return 0;
262*4882a593Smuzhiyun 
263*4882a593Smuzhiyun 	ret = ttm_eu_reserve_buffers(&release->ticket, &release->bos,
264*4882a593Smuzhiyun 				     !no_intr, NULL);
265*4882a593Smuzhiyun 	if (ret)
266*4882a593Smuzhiyun 		return ret;
267*4882a593Smuzhiyun 
268*4882a593Smuzhiyun 	list_for_each_entry(entry, &release->bos, tv.head) {
269*4882a593Smuzhiyun 		struct qxl_bo *bo = to_qxl_bo(entry->tv.bo);
270*4882a593Smuzhiyun 
271*4882a593Smuzhiyun 		ret = qxl_release_validate_bo(bo);
272*4882a593Smuzhiyun 		if (ret) {
273*4882a593Smuzhiyun 			ttm_eu_backoff_reservation(&release->ticket, &release->bos);
274*4882a593Smuzhiyun 			return ret;
275*4882a593Smuzhiyun 		}
276*4882a593Smuzhiyun 	}
277*4882a593Smuzhiyun 	return 0;
278*4882a593Smuzhiyun }
279*4882a593Smuzhiyun 
qxl_release_backoff_reserve_list(struct qxl_release * release)280*4882a593Smuzhiyun void qxl_release_backoff_reserve_list(struct qxl_release *release)
281*4882a593Smuzhiyun {
282*4882a593Smuzhiyun 	/* if only one object on the release its the release itself
283*4882a593Smuzhiyun 	   since these objects are pinned no need to reserve */
284*4882a593Smuzhiyun 	if (list_is_singular(&release->bos))
285*4882a593Smuzhiyun 		return;
286*4882a593Smuzhiyun 
287*4882a593Smuzhiyun 	ttm_eu_backoff_reservation(&release->ticket, &release->bos);
288*4882a593Smuzhiyun }
289*4882a593Smuzhiyun 
qxl_alloc_surface_release_reserved(struct qxl_device * qdev,enum qxl_surface_cmd_type surface_cmd_type,struct qxl_release * create_rel,struct qxl_release ** release)290*4882a593Smuzhiyun int qxl_alloc_surface_release_reserved(struct qxl_device *qdev,
291*4882a593Smuzhiyun 				       enum qxl_surface_cmd_type surface_cmd_type,
292*4882a593Smuzhiyun 				       struct qxl_release *create_rel,
293*4882a593Smuzhiyun 				       struct qxl_release **release)
294*4882a593Smuzhiyun {
295*4882a593Smuzhiyun 	if (surface_cmd_type == QXL_SURFACE_CMD_DESTROY && create_rel) {
296*4882a593Smuzhiyun 		int idr_ret;
297*4882a593Smuzhiyun 		struct qxl_bo *bo;
298*4882a593Smuzhiyun 		union qxl_release_info *info;
299*4882a593Smuzhiyun 
300*4882a593Smuzhiyun 		/* stash the release after the create command */
301*4882a593Smuzhiyun 		idr_ret = qxl_release_alloc(qdev, QXL_RELEASE_SURFACE_CMD, release);
302*4882a593Smuzhiyun 		if (idr_ret < 0)
303*4882a593Smuzhiyun 			return idr_ret;
304*4882a593Smuzhiyun 		bo = create_rel->release_bo;
305*4882a593Smuzhiyun 
306*4882a593Smuzhiyun 		(*release)->release_bo = bo;
307*4882a593Smuzhiyun 		(*release)->release_offset = create_rel->release_offset + 64;
308*4882a593Smuzhiyun 
309*4882a593Smuzhiyun 		qxl_release_list_add(*release, bo);
310*4882a593Smuzhiyun 
311*4882a593Smuzhiyun 		info = qxl_release_map(qdev, *release);
312*4882a593Smuzhiyun 		info->id = idr_ret;
313*4882a593Smuzhiyun 		qxl_release_unmap(qdev, *release, info);
314*4882a593Smuzhiyun 		return 0;
315*4882a593Smuzhiyun 	}
316*4882a593Smuzhiyun 
317*4882a593Smuzhiyun 	return qxl_alloc_release_reserved(qdev, sizeof(struct qxl_surface_cmd),
318*4882a593Smuzhiyun 					 QXL_RELEASE_SURFACE_CMD, release, NULL);
319*4882a593Smuzhiyun }
320*4882a593Smuzhiyun 
qxl_alloc_release_reserved(struct qxl_device * qdev,unsigned long size,int type,struct qxl_release ** release,struct qxl_bo ** rbo)321*4882a593Smuzhiyun int qxl_alloc_release_reserved(struct qxl_device *qdev, unsigned long size,
322*4882a593Smuzhiyun 				       int type, struct qxl_release **release,
323*4882a593Smuzhiyun 				       struct qxl_bo **rbo)
324*4882a593Smuzhiyun {
325*4882a593Smuzhiyun 	struct qxl_bo *bo;
326*4882a593Smuzhiyun 	int idr_ret;
327*4882a593Smuzhiyun 	int ret = 0;
328*4882a593Smuzhiyun 	union qxl_release_info *info;
329*4882a593Smuzhiyun 	int cur_idx;
330*4882a593Smuzhiyun 	u32 priority;
331*4882a593Smuzhiyun 
332*4882a593Smuzhiyun 	if (type == QXL_RELEASE_DRAWABLE) {
333*4882a593Smuzhiyun 		cur_idx = 0;
334*4882a593Smuzhiyun 		priority = 0;
335*4882a593Smuzhiyun 	} else if (type == QXL_RELEASE_SURFACE_CMD) {
336*4882a593Smuzhiyun 		cur_idx = 1;
337*4882a593Smuzhiyun 		priority = 1;
338*4882a593Smuzhiyun 	} else if (type == QXL_RELEASE_CURSOR_CMD) {
339*4882a593Smuzhiyun 		cur_idx = 2;
340*4882a593Smuzhiyun 		priority = 1;
341*4882a593Smuzhiyun 	}
342*4882a593Smuzhiyun 	else {
343*4882a593Smuzhiyun 		DRM_ERROR("got illegal type: %d\n", type);
344*4882a593Smuzhiyun 		return -EINVAL;
345*4882a593Smuzhiyun 	}
346*4882a593Smuzhiyun 
347*4882a593Smuzhiyun 	idr_ret = qxl_release_alloc(qdev, type, release);
348*4882a593Smuzhiyun 	if (idr_ret < 0) {
349*4882a593Smuzhiyun 		if (rbo)
350*4882a593Smuzhiyun 			*rbo = NULL;
351*4882a593Smuzhiyun 		return idr_ret;
352*4882a593Smuzhiyun 	}
353*4882a593Smuzhiyun 
354*4882a593Smuzhiyun 	mutex_lock(&qdev->release_mutex);
355*4882a593Smuzhiyun 	if (qdev->current_release_bo_offset[cur_idx] + 1 >= releases_per_bo[cur_idx]) {
356*4882a593Smuzhiyun 		qxl_bo_unref(&qdev->current_release_bo[cur_idx]);
357*4882a593Smuzhiyun 		qdev->current_release_bo_offset[cur_idx] = 0;
358*4882a593Smuzhiyun 		qdev->current_release_bo[cur_idx] = NULL;
359*4882a593Smuzhiyun 	}
360*4882a593Smuzhiyun 	if (!qdev->current_release_bo[cur_idx]) {
361*4882a593Smuzhiyun 		ret = qxl_release_bo_alloc(qdev, &qdev->current_release_bo[cur_idx], priority);
362*4882a593Smuzhiyun 		if (ret) {
363*4882a593Smuzhiyun 			mutex_unlock(&qdev->release_mutex);
364*4882a593Smuzhiyun 			qxl_release_free(qdev, *release);
365*4882a593Smuzhiyun 			return ret;
366*4882a593Smuzhiyun 		}
367*4882a593Smuzhiyun 	}
368*4882a593Smuzhiyun 
369*4882a593Smuzhiyun 	bo = qxl_bo_ref(qdev->current_release_bo[cur_idx]);
370*4882a593Smuzhiyun 
371*4882a593Smuzhiyun 	(*release)->release_bo = bo;
372*4882a593Smuzhiyun 	(*release)->release_offset = qdev->current_release_bo_offset[cur_idx] * release_size_per_bo[cur_idx];
373*4882a593Smuzhiyun 	qdev->current_release_bo_offset[cur_idx]++;
374*4882a593Smuzhiyun 
375*4882a593Smuzhiyun 	if (rbo)
376*4882a593Smuzhiyun 		*rbo = bo;
377*4882a593Smuzhiyun 
378*4882a593Smuzhiyun 	mutex_unlock(&qdev->release_mutex);
379*4882a593Smuzhiyun 
380*4882a593Smuzhiyun 	ret = qxl_release_list_add(*release, bo);
381*4882a593Smuzhiyun 	qxl_bo_unref(&bo);
382*4882a593Smuzhiyun 	if (ret) {
383*4882a593Smuzhiyun 		qxl_release_free(qdev, *release);
384*4882a593Smuzhiyun 		return ret;
385*4882a593Smuzhiyun 	}
386*4882a593Smuzhiyun 
387*4882a593Smuzhiyun 	info = qxl_release_map(qdev, *release);
388*4882a593Smuzhiyun 	info->id = idr_ret;
389*4882a593Smuzhiyun 	qxl_release_unmap(qdev, *release, info);
390*4882a593Smuzhiyun 
391*4882a593Smuzhiyun 	return ret;
392*4882a593Smuzhiyun }
393*4882a593Smuzhiyun 
qxl_release_from_id_locked(struct qxl_device * qdev,uint64_t id)394*4882a593Smuzhiyun struct qxl_release *qxl_release_from_id_locked(struct qxl_device *qdev,
395*4882a593Smuzhiyun 						   uint64_t id)
396*4882a593Smuzhiyun {
397*4882a593Smuzhiyun 	struct qxl_release *release;
398*4882a593Smuzhiyun 
399*4882a593Smuzhiyun 	spin_lock(&qdev->release_idr_lock);
400*4882a593Smuzhiyun 	release = idr_find(&qdev->release_idr, id);
401*4882a593Smuzhiyun 	spin_unlock(&qdev->release_idr_lock);
402*4882a593Smuzhiyun 	if (!release) {
403*4882a593Smuzhiyun 		DRM_ERROR("failed to find id in release_idr\n");
404*4882a593Smuzhiyun 		return NULL;
405*4882a593Smuzhiyun 	}
406*4882a593Smuzhiyun 
407*4882a593Smuzhiyun 	return release;
408*4882a593Smuzhiyun }
409*4882a593Smuzhiyun 
qxl_release_map(struct qxl_device * qdev,struct qxl_release * release)410*4882a593Smuzhiyun union qxl_release_info *qxl_release_map(struct qxl_device *qdev,
411*4882a593Smuzhiyun 					struct qxl_release *release)
412*4882a593Smuzhiyun {
413*4882a593Smuzhiyun 	void *ptr;
414*4882a593Smuzhiyun 	union qxl_release_info *info;
415*4882a593Smuzhiyun 	struct qxl_bo *bo = release->release_bo;
416*4882a593Smuzhiyun 
417*4882a593Smuzhiyun 	ptr = qxl_bo_kmap_atomic_page(qdev, bo, release->release_offset & PAGE_MASK);
418*4882a593Smuzhiyun 	if (!ptr)
419*4882a593Smuzhiyun 		return NULL;
420*4882a593Smuzhiyun 	info = ptr + (release->release_offset & ~PAGE_MASK);
421*4882a593Smuzhiyun 	return info;
422*4882a593Smuzhiyun }
423*4882a593Smuzhiyun 
qxl_release_unmap(struct qxl_device * qdev,struct qxl_release * release,union qxl_release_info * info)424*4882a593Smuzhiyun void qxl_release_unmap(struct qxl_device *qdev,
425*4882a593Smuzhiyun 		       struct qxl_release *release,
426*4882a593Smuzhiyun 		       union qxl_release_info *info)
427*4882a593Smuzhiyun {
428*4882a593Smuzhiyun 	struct qxl_bo *bo = release->release_bo;
429*4882a593Smuzhiyun 	void *ptr;
430*4882a593Smuzhiyun 
431*4882a593Smuzhiyun 	ptr = ((void *)info) - (release->release_offset & ~PAGE_MASK);
432*4882a593Smuzhiyun 	qxl_bo_kunmap_atomic_page(qdev, bo, ptr);
433*4882a593Smuzhiyun }
434*4882a593Smuzhiyun 
qxl_release_fence_buffer_objects(struct qxl_release * release)435*4882a593Smuzhiyun void qxl_release_fence_buffer_objects(struct qxl_release *release)
436*4882a593Smuzhiyun {
437*4882a593Smuzhiyun 	struct ttm_buffer_object *bo;
438*4882a593Smuzhiyun 	struct ttm_bo_device *bdev;
439*4882a593Smuzhiyun 	struct ttm_validate_buffer *entry;
440*4882a593Smuzhiyun 	struct qxl_device *qdev;
441*4882a593Smuzhiyun 
442*4882a593Smuzhiyun 	/* if only one object on the release its the release itself
443*4882a593Smuzhiyun 	   since these objects are pinned no need to reserve */
444*4882a593Smuzhiyun 	if (list_is_singular(&release->bos) || list_empty(&release->bos))
445*4882a593Smuzhiyun 		return;
446*4882a593Smuzhiyun 
447*4882a593Smuzhiyun 	bo = list_first_entry(&release->bos, struct ttm_validate_buffer, head)->bo;
448*4882a593Smuzhiyun 	bdev = bo->bdev;
449*4882a593Smuzhiyun 	qdev = container_of(bdev, struct qxl_device, mman.bdev);
450*4882a593Smuzhiyun 
451*4882a593Smuzhiyun 	/*
452*4882a593Smuzhiyun 	 * Since we never really allocated a context and we don't want to conflict,
453*4882a593Smuzhiyun 	 * set the highest bits. This will break if we really allow exporting of dma-bufs.
454*4882a593Smuzhiyun 	 */
455*4882a593Smuzhiyun 	dma_fence_init(&release->base, &qxl_fence_ops, &qdev->release_lock,
456*4882a593Smuzhiyun 		       release->id | 0xf0000000, release->base.seqno);
457*4882a593Smuzhiyun 	trace_dma_fence_emit(&release->base);
458*4882a593Smuzhiyun 
459*4882a593Smuzhiyun 	spin_lock(&ttm_bo_glob.lru_lock);
460*4882a593Smuzhiyun 
461*4882a593Smuzhiyun 	list_for_each_entry(entry, &release->bos, head) {
462*4882a593Smuzhiyun 		bo = entry->bo;
463*4882a593Smuzhiyun 
464*4882a593Smuzhiyun 		dma_resv_add_shared_fence(bo->base.resv, &release->base);
465*4882a593Smuzhiyun 		ttm_bo_move_to_lru_tail(bo, NULL);
466*4882a593Smuzhiyun 		dma_resv_unlock(bo->base.resv);
467*4882a593Smuzhiyun 	}
468*4882a593Smuzhiyun 	spin_unlock(&ttm_bo_glob.lru_lock);
469*4882a593Smuzhiyun 	ww_acquire_fini(&release->ticket);
470*4882a593Smuzhiyun }
471*4882a593Smuzhiyun 
472