xref: /OK3568_Linux_fs/kernel/drivers/gpu/drm/radeon/radeon_sa.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun  * Copyright 2011 Red Hat Inc.
3*4882a593Smuzhiyun  * All Rights Reserved.
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  * Permission is hereby granted, free of charge, to any person obtaining a
6*4882a593Smuzhiyun  * copy of this software and associated documentation files (the
7*4882a593Smuzhiyun  * "Software"), to deal in the Software without restriction, including
8*4882a593Smuzhiyun  * without limitation the rights to use, copy, modify, merge, publish,
9*4882a593Smuzhiyun  * distribute, sub license, and/or sell copies of the Software, and to
10*4882a593Smuzhiyun  * permit persons to whom the Software is furnished to do so, subject to
11*4882a593Smuzhiyun  * the following conditions:
12*4882a593Smuzhiyun  *
13*4882a593Smuzhiyun  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14*4882a593Smuzhiyun  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15*4882a593Smuzhiyun  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16*4882a593Smuzhiyun  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17*4882a593Smuzhiyun  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18*4882a593Smuzhiyun  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19*4882a593Smuzhiyun  * USE OR OTHER DEALINGS IN THE SOFTWARE.
20*4882a593Smuzhiyun  *
21*4882a593Smuzhiyun  * The above copyright notice and this permission notice (including the
22*4882a593Smuzhiyun  * next paragraph) shall be included in all copies or substantial portions
23*4882a593Smuzhiyun  * of the Software.
24*4882a593Smuzhiyun  *
25*4882a593Smuzhiyun  */
26*4882a593Smuzhiyun /*
27*4882a593Smuzhiyun  * Authors:
28*4882a593Smuzhiyun  *    Jerome Glisse <glisse@freedesktop.org>
29*4882a593Smuzhiyun  */
30*4882a593Smuzhiyun /* Algorithm:
31*4882a593Smuzhiyun  *
32*4882a593Smuzhiyun  * We store the last allocated bo in "hole", we always try to allocate
33*4882a593Smuzhiyun  * after the last allocated bo. Principle is that in a linear GPU ring
34*4882a593Smuzhiyun  * progression was is after last is the oldest bo we allocated and thus
35*4882a593Smuzhiyun  * the first one that should no longer be in use by the GPU.
36*4882a593Smuzhiyun  *
37*4882a593Smuzhiyun  * If it's not the case we skip over the bo after last to the closest
38*4882a593Smuzhiyun  * done bo if such one exist. If none exist and we are not asked to
39*4882a593Smuzhiyun  * block we report failure to allocate.
40*4882a593Smuzhiyun  *
41*4882a593Smuzhiyun  * If we are asked to block we wait on all the oldest fence of all
42*4882a593Smuzhiyun  * rings. We just wait for any of those fence to complete.
43*4882a593Smuzhiyun  */
44*4882a593Smuzhiyun 
45*4882a593Smuzhiyun #include "radeon.h"
46*4882a593Smuzhiyun 
47*4882a593Smuzhiyun static void radeon_sa_bo_remove_locked(struct radeon_sa_bo *sa_bo);
48*4882a593Smuzhiyun static void radeon_sa_bo_try_free(struct radeon_sa_manager *sa_manager);
49*4882a593Smuzhiyun 
radeon_sa_bo_manager_init(struct radeon_device * rdev,struct radeon_sa_manager * sa_manager,unsigned size,u32 align,u32 domain,u32 flags)50*4882a593Smuzhiyun int radeon_sa_bo_manager_init(struct radeon_device *rdev,
51*4882a593Smuzhiyun 			      struct radeon_sa_manager *sa_manager,
52*4882a593Smuzhiyun 			      unsigned size, u32 align, u32 domain, u32 flags)
53*4882a593Smuzhiyun {
54*4882a593Smuzhiyun 	int i, r;
55*4882a593Smuzhiyun 
56*4882a593Smuzhiyun 	init_waitqueue_head(&sa_manager->wq);
57*4882a593Smuzhiyun 	sa_manager->bo = NULL;
58*4882a593Smuzhiyun 	sa_manager->size = size;
59*4882a593Smuzhiyun 	sa_manager->domain = domain;
60*4882a593Smuzhiyun 	sa_manager->align = align;
61*4882a593Smuzhiyun 	sa_manager->hole = &sa_manager->olist;
62*4882a593Smuzhiyun 	INIT_LIST_HEAD(&sa_manager->olist);
63*4882a593Smuzhiyun 	for (i = 0; i < RADEON_NUM_RINGS; ++i) {
64*4882a593Smuzhiyun 		INIT_LIST_HEAD(&sa_manager->flist[i]);
65*4882a593Smuzhiyun 	}
66*4882a593Smuzhiyun 
67*4882a593Smuzhiyun 	r = radeon_bo_create(rdev, size, align, true,
68*4882a593Smuzhiyun 			     domain, flags, NULL, NULL, &sa_manager->bo);
69*4882a593Smuzhiyun 	if (r) {
70*4882a593Smuzhiyun 		dev_err(rdev->dev, "(%d) failed to allocate bo for manager\n", r);
71*4882a593Smuzhiyun 		return r;
72*4882a593Smuzhiyun 	}
73*4882a593Smuzhiyun 
74*4882a593Smuzhiyun 	return r;
75*4882a593Smuzhiyun }
76*4882a593Smuzhiyun 
radeon_sa_bo_manager_fini(struct radeon_device * rdev,struct radeon_sa_manager * sa_manager)77*4882a593Smuzhiyun void radeon_sa_bo_manager_fini(struct radeon_device *rdev,
78*4882a593Smuzhiyun 			       struct radeon_sa_manager *sa_manager)
79*4882a593Smuzhiyun {
80*4882a593Smuzhiyun 	struct radeon_sa_bo *sa_bo, *tmp;
81*4882a593Smuzhiyun 
82*4882a593Smuzhiyun 	if (!list_empty(&sa_manager->olist)) {
83*4882a593Smuzhiyun 		sa_manager->hole = &sa_manager->olist,
84*4882a593Smuzhiyun 		radeon_sa_bo_try_free(sa_manager);
85*4882a593Smuzhiyun 		if (!list_empty(&sa_manager->olist)) {
86*4882a593Smuzhiyun 			dev_err(rdev->dev, "sa_manager is not empty, clearing anyway\n");
87*4882a593Smuzhiyun 		}
88*4882a593Smuzhiyun 	}
89*4882a593Smuzhiyun 	list_for_each_entry_safe(sa_bo, tmp, &sa_manager->olist, olist) {
90*4882a593Smuzhiyun 		radeon_sa_bo_remove_locked(sa_bo);
91*4882a593Smuzhiyun 	}
92*4882a593Smuzhiyun 	radeon_bo_unref(&sa_manager->bo);
93*4882a593Smuzhiyun 	sa_manager->size = 0;
94*4882a593Smuzhiyun }
95*4882a593Smuzhiyun 
radeon_sa_bo_manager_start(struct radeon_device * rdev,struct radeon_sa_manager * sa_manager)96*4882a593Smuzhiyun int radeon_sa_bo_manager_start(struct radeon_device *rdev,
97*4882a593Smuzhiyun 			       struct radeon_sa_manager *sa_manager)
98*4882a593Smuzhiyun {
99*4882a593Smuzhiyun 	int r;
100*4882a593Smuzhiyun 
101*4882a593Smuzhiyun 	if (sa_manager->bo == NULL) {
102*4882a593Smuzhiyun 		dev_err(rdev->dev, "no bo for sa manager\n");
103*4882a593Smuzhiyun 		return -EINVAL;
104*4882a593Smuzhiyun 	}
105*4882a593Smuzhiyun 
106*4882a593Smuzhiyun 	/* map the buffer */
107*4882a593Smuzhiyun 	r = radeon_bo_reserve(sa_manager->bo, false);
108*4882a593Smuzhiyun 	if (r) {
109*4882a593Smuzhiyun 		dev_err(rdev->dev, "(%d) failed to reserve manager bo\n", r);
110*4882a593Smuzhiyun 		return r;
111*4882a593Smuzhiyun 	}
112*4882a593Smuzhiyun 	r = radeon_bo_pin(sa_manager->bo, sa_manager->domain, &sa_manager->gpu_addr);
113*4882a593Smuzhiyun 	if (r) {
114*4882a593Smuzhiyun 		radeon_bo_unreserve(sa_manager->bo);
115*4882a593Smuzhiyun 		dev_err(rdev->dev, "(%d) failed to pin manager bo\n", r);
116*4882a593Smuzhiyun 		return r;
117*4882a593Smuzhiyun 	}
118*4882a593Smuzhiyun 	r = radeon_bo_kmap(sa_manager->bo, &sa_manager->cpu_ptr);
119*4882a593Smuzhiyun 	radeon_bo_unreserve(sa_manager->bo);
120*4882a593Smuzhiyun 	return r;
121*4882a593Smuzhiyun }
122*4882a593Smuzhiyun 
radeon_sa_bo_manager_suspend(struct radeon_device * rdev,struct radeon_sa_manager * sa_manager)123*4882a593Smuzhiyun int radeon_sa_bo_manager_suspend(struct radeon_device *rdev,
124*4882a593Smuzhiyun 				 struct radeon_sa_manager *sa_manager)
125*4882a593Smuzhiyun {
126*4882a593Smuzhiyun 	int r;
127*4882a593Smuzhiyun 
128*4882a593Smuzhiyun 	if (sa_manager->bo == NULL) {
129*4882a593Smuzhiyun 		dev_err(rdev->dev, "no bo for sa manager\n");
130*4882a593Smuzhiyun 		return -EINVAL;
131*4882a593Smuzhiyun 	}
132*4882a593Smuzhiyun 
133*4882a593Smuzhiyun 	r = radeon_bo_reserve(sa_manager->bo, false);
134*4882a593Smuzhiyun 	if (!r) {
135*4882a593Smuzhiyun 		radeon_bo_kunmap(sa_manager->bo);
136*4882a593Smuzhiyun 		radeon_bo_unpin(sa_manager->bo);
137*4882a593Smuzhiyun 		radeon_bo_unreserve(sa_manager->bo);
138*4882a593Smuzhiyun 	}
139*4882a593Smuzhiyun 	return r;
140*4882a593Smuzhiyun }
141*4882a593Smuzhiyun 
radeon_sa_bo_remove_locked(struct radeon_sa_bo * sa_bo)142*4882a593Smuzhiyun static void radeon_sa_bo_remove_locked(struct radeon_sa_bo *sa_bo)
143*4882a593Smuzhiyun {
144*4882a593Smuzhiyun 	struct radeon_sa_manager *sa_manager = sa_bo->manager;
145*4882a593Smuzhiyun 	if (sa_manager->hole == &sa_bo->olist) {
146*4882a593Smuzhiyun 		sa_manager->hole = sa_bo->olist.prev;
147*4882a593Smuzhiyun 	}
148*4882a593Smuzhiyun 	list_del_init(&sa_bo->olist);
149*4882a593Smuzhiyun 	list_del_init(&sa_bo->flist);
150*4882a593Smuzhiyun 	radeon_fence_unref(&sa_bo->fence);
151*4882a593Smuzhiyun 	kfree(sa_bo);
152*4882a593Smuzhiyun }
153*4882a593Smuzhiyun 
radeon_sa_bo_try_free(struct radeon_sa_manager * sa_manager)154*4882a593Smuzhiyun static void radeon_sa_bo_try_free(struct radeon_sa_manager *sa_manager)
155*4882a593Smuzhiyun {
156*4882a593Smuzhiyun 	struct radeon_sa_bo *sa_bo, *tmp;
157*4882a593Smuzhiyun 
158*4882a593Smuzhiyun 	if (sa_manager->hole->next == &sa_manager->olist)
159*4882a593Smuzhiyun 		return;
160*4882a593Smuzhiyun 
161*4882a593Smuzhiyun 	sa_bo = list_entry(sa_manager->hole->next, struct radeon_sa_bo, olist);
162*4882a593Smuzhiyun 	list_for_each_entry_safe_from(sa_bo, tmp, &sa_manager->olist, olist) {
163*4882a593Smuzhiyun 		if (sa_bo->fence == NULL || !radeon_fence_signaled(sa_bo->fence)) {
164*4882a593Smuzhiyun 			return;
165*4882a593Smuzhiyun 		}
166*4882a593Smuzhiyun 		radeon_sa_bo_remove_locked(sa_bo);
167*4882a593Smuzhiyun 	}
168*4882a593Smuzhiyun }
169*4882a593Smuzhiyun 
radeon_sa_bo_hole_soffset(struct radeon_sa_manager * sa_manager)170*4882a593Smuzhiyun static inline unsigned radeon_sa_bo_hole_soffset(struct radeon_sa_manager *sa_manager)
171*4882a593Smuzhiyun {
172*4882a593Smuzhiyun 	struct list_head *hole = sa_manager->hole;
173*4882a593Smuzhiyun 
174*4882a593Smuzhiyun 	if (hole != &sa_manager->olist) {
175*4882a593Smuzhiyun 		return list_entry(hole, struct radeon_sa_bo, olist)->eoffset;
176*4882a593Smuzhiyun 	}
177*4882a593Smuzhiyun 	return 0;
178*4882a593Smuzhiyun }
179*4882a593Smuzhiyun 
radeon_sa_bo_hole_eoffset(struct radeon_sa_manager * sa_manager)180*4882a593Smuzhiyun static inline unsigned radeon_sa_bo_hole_eoffset(struct radeon_sa_manager *sa_manager)
181*4882a593Smuzhiyun {
182*4882a593Smuzhiyun 	struct list_head *hole = sa_manager->hole;
183*4882a593Smuzhiyun 
184*4882a593Smuzhiyun 	if (hole->next != &sa_manager->olist) {
185*4882a593Smuzhiyun 		return list_entry(hole->next, struct radeon_sa_bo, olist)->soffset;
186*4882a593Smuzhiyun 	}
187*4882a593Smuzhiyun 	return sa_manager->size;
188*4882a593Smuzhiyun }
189*4882a593Smuzhiyun 
radeon_sa_bo_try_alloc(struct radeon_sa_manager * sa_manager,struct radeon_sa_bo * sa_bo,unsigned size,unsigned align)190*4882a593Smuzhiyun static bool radeon_sa_bo_try_alloc(struct radeon_sa_manager *sa_manager,
191*4882a593Smuzhiyun 				   struct radeon_sa_bo *sa_bo,
192*4882a593Smuzhiyun 				   unsigned size, unsigned align)
193*4882a593Smuzhiyun {
194*4882a593Smuzhiyun 	unsigned soffset, eoffset, wasted;
195*4882a593Smuzhiyun 
196*4882a593Smuzhiyun 	soffset = radeon_sa_bo_hole_soffset(sa_manager);
197*4882a593Smuzhiyun 	eoffset = radeon_sa_bo_hole_eoffset(sa_manager);
198*4882a593Smuzhiyun 	wasted = (align - (soffset % align)) % align;
199*4882a593Smuzhiyun 
200*4882a593Smuzhiyun 	if ((eoffset - soffset) >= (size + wasted)) {
201*4882a593Smuzhiyun 		soffset += wasted;
202*4882a593Smuzhiyun 
203*4882a593Smuzhiyun 		sa_bo->manager = sa_manager;
204*4882a593Smuzhiyun 		sa_bo->soffset = soffset;
205*4882a593Smuzhiyun 		sa_bo->eoffset = soffset + size;
206*4882a593Smuzhiyun 		list_add(&sa_bo->olist, sa_manager->hole);
207*4882a593Smuzhiyun 		INIT_LIST_HEAD(&sa_bo->flist);
208*4882a593Smuzhiyun 		sa_manager->hole = &sa_bo->olist;
209*4882a593Smuzhiyun 		return true;
210*4882a593Smuzhiyun 	}
211*4882a593Smuzhiyun 	return false;
212*4882a593Smuzhiyun }
213*4882a593Smuzhiyun 
214*4882a593Smuzhiyun /**
215*4882a593Smuzhiyun  * radeon_sa_event - Check if we can stop waiting
216*4882a593Smuzhiyun  *
217*4882a593Smuzhiyun  * @sa_manager: pointer to the sa_manager
218*4882a593Smuzhiyun  * @size: number of bytes we want to allocate
219*4882a593Smuzhiyun  * @align: alignment we need to match
220*4882a593Smuzhiyun  *
221*4882a593Smuzhiyun  * Check if either there is a fence we can wait for or
222*4882a593Smuzhiyun  * enough free memory to satisfy the allocation directly
223*4882a593Smuzhiyun  */
radeon_sa_event(struct radeon_sa_manager * sa_manager,unsigned size,unsigned align)224*4882a593Smuzhiyun static bool radeon_sa_event(struct radeon_sa_manager *sa_manager,
225*4882a593Smuzhiyun 			    unsigned size, unsigned align)
226*4882a593Smuzhiyun {
227*4882a593Smuzhiyun 	unsigned soffset, eoffset, wasted;
228*4882a593Smuzhiyun 	int i;
229*4882a593Smuzhiyun 
230*4882a593Smuzhiyun 	for (i = 0; i < RADEON_NUM_RINGS; ++i) {
231*4882a593Smuzhiyun 		if (!list_empty(&sa_manager->flist[i])) {
232*4882a593Smuzhiyun 			return true;
233*4882a593Smuzhiyun 		}
234*4882a593Smuzhiyun 	}
235*4882a593Smuzhiyun 
236*4882a593Smuzhiyun 	soffset = radeon_sa_bo_hole_soffset(sa_manager);
237*4882a593Smuzhiyun 	eoffset = radeon_sa_bo_hole_eoffset(sa_manager);
238*4882a593Smuzhiyun 	wasted = (align - (soffset % align)) % align;
239*4882a593Smuzhiyun 
240*4882a593Smuzhiyun 	if ((eoffset - soffset) >= (size + wasted)) {
241*4882a593Smuzhiyun 		return true;
242*4882a593Smuzhiyun 	}
243*4882a593Smuzhiyun 
244*4882a593Smuzhiyun 	return false;
245*4882a593Smuzhiyun }
246*4882a593Smuzhiyun 
radeon_sa_bo_next_hole(struct radeon_sa_manager * sa_manager,struct radeon_fence ** fences,unsigned * tries)247*4882a593Smuzhiyun static bool radeon_sa_bo_next_hole(struct radeon_sa_manager *sa_manager,
248*4882a593Smuzhiyun 				   struct radeon_fence **fences,
249*4882a593Smuzhiyun 				   unsigned *tries)
250*4882a593Smuzhiyun {
251*4882a593Smuzhiyun 	struct radeon_sa_bo *best_bo = NULL;
252*4882a593Smuzhiyun 	unsigned i, soffset, best, tmp;
253*4882a593Smuzhiyun 
254*4882a593Smuzhiyun 	/* if hole points to the end of the buffer */
255*4882a593Smuzhiyun 	if (sa_manager->hole->next == &sa_manager->olist) {
256*4882a593Smuzhiyun 		/* try again with its beginning */
257*4882a593Smuzhiyun 		sa_manager->hole = &sa_manager->olist;
258*4882a593Smuzhiyun 		return true;
259*4882a593Smuzhiyun 	}
260*4882a593Smuzhiyun 
261*4882a593Smuzhiyun 	soffset = radeon_sa_bo_hole_soffset(sa_manager);
262*4882a593Smuzhiyun 	/* to handle wrap around we add sa_manager->size */
263*4882a593Smuzhiyun 	best = sa_manager->size * 2;
264*4882a593Smuzhiyun 	/* go over all fence list and try to find the closest sa_bo
265*4882a593Smuzhiyun 	 * of the current last
266*4882a593Smuzhiyun 	 */
267*4882a593Smuzhiyun 	for (i = 0; i < RADEON_NUM_RINGS; ++i) {
268*4882a593Smuzhiyun 		struct radeon_sa_bo *sa_bo;
269*4882a593Smuzhiyun 
270*4882a593Smuzhiyun 		if (list_empty(&sa_manager->flist[i])) {
271*4882a593Smuzhiyun 			continue;
272*4882a593Smuzhiyun 		}
273*4882a593Smuzhiyun 
274*4882a593Smuzhiyun 		sa_bo = list_first_entry(&sa_manager->flist[i],
275*4882a593Smuzhiyun 					 struct radeon_sa_bo, flist);
276*4882a593Smuzhiyun 
277*4882a593Smuzhiyun 		if (!radeon_fence_signaled(sa_bo->fence)) {
278*4882a593Smuzhiyun 			fences[i] = sa_bo->fence;
279*4882a593Smuzhiyun 			continue;
280*4882a593Smuzhiyun 		}
281*4882a593Smuzhiyun 
282*4882a593Smuzhiyun 		/* limit the number of tries each ring gets */
283*4882a593Smuzhiyun 		if (tries[i] > 2) {
284*4882a593Smuzhiyun 			continue;
285*4882a593Smuzhiyun 		}
286*4882a593Smuzhiyun 
287*4882a593Smuzhiyun 		tmp = sa_bo->soffset;
288*4882a593Smuzhiyun 		if (tmp < soffset) {
289*4882a593Smuzhiyun 			/* wrap around, pretend it's after */
290*4882a593Smuzhiyun 			tmp += sa_manager->size;
291*4882a593Smuzhiyun 		}
292*4882a593Smuzhiyun 		tmp -= soffset;
293*4882a593Smuzhiyun 		if (tmp < best) {
294*4882a593Smuzhiyun 			/* this sa bo is the closest one */
295*4882a593Smuzhiyun 			best = tmp;
296*4882a593Smuzhiyun 			best_bo = sa_bo;
297*4882a593Smuzhiyun 		}
298*4882a593Smuzhiyun 	}
299*4882a593Smuzhiyun 
300*4882a593Smuzhiyun 	if (best_bo) {
301*4882a593Smuzhiyun 		++tries[best_bo->fence->ring];
302*4882a593Smuzhiyun 		sa_manager->hole = best_bo->olist.prev;
303*4882a593Smuzhiyun 
304*4882a593Smuzhiyun 		/* we knew that this one is signaled,
305*4882a593Smuzhiyun 		   so it's save to remote it */
306*4882a593Smuzhiyun 		radeon_sa_bo_remove_locked(best_bo);
307*4882a593Smuzhiyun 		return true;
308*4882a593Smuzhiyun 	}
309*4882a593Smuzhiyun 	return false;
310*4882a593Smuzhiyun }
311*4882a593Smuzhiyun 
radeon_sa_bo_new(struct radeon_device * rdev,struct radeon_sa_manager * sa_manager,struct radeon_sa_bo ** sa_bo,unsigned size,unsigned align)312*4882a593Smuzhiyun int radeon_sa_bo_new(struct radeon_device *rdev,
313*4882a593Smuzhiyun 		     struct radeon_sa_manager *sa_manager,
314*4882a593Smuzhiyun 		     struct radeon_sa_bo **sa_bo,
315*4882a593Smuzhiyun 		     unsigned size, unsigned align)
316*4882a593Smuzhiyun {
317*4882a593Smuzhiyun 	struct radeon_fence *fences[RADEON_NUM_RINGS];
318*4882a593Smuzhiyun 	unsigned tries[RADEON_NUM_RINGS];
319*4882a593Smuzhiyun 	int i, r;
320*4882a593Smuzhiyun 
321*4882a593Smuzhiyun 	BUG_ON(align > sa_manager->align);
322*4882a593Smuzhiyun 	BUG_ON(size > sa_manager->size);
323*4882a593Smuzhiyun 
324*4882a593Smuzhiyun 	*sa_bo = kmalloc(sizeof(struct radeon_sa_bo), GFP_KERNEL);
325*4882a593Smuzhiyun 	if ((*sa_bo) == NULL) {
326*4882a593Smuzhiyun 		return -ENOMEM;
327*4882a593Smuzhiyun 	}
328*4882a593Smuzhiyun 	(*sa_bo)->manager = sa_manager;
329*4882a593Smuzhiyun 	(*sa_bo)->fence = NULL;
330*4882a593Smuzhiyun 	INIT_LIST_HEAD(&(*sa_bo)->olist);
331*4882a593Smuzhiyun 	INIT_LIST_HEAD(&(*sa_bo)->flist);
332*4882a593Smuzhiyun 
333*4882a593Smuzhiyun 	spin_lock(&sa_manager->wq.lock);
334*4882a593Smuzhiyun 	do {
335*4882a593Smuzhiyun 		for (i = 0; i < RADEON_NUM_RINGS; ++i) {
336*4882a593Smuzhiyun 			fences[i] = NULL;
337*4882a593Smuzhiyun 			tries[i] = 0;
338*4882a593Smuzhiyun 		}
339*4882a593Smuzhiyun 
340*4882a593Smuzhiyun 		do {
341*4882a593Smuzhiyun 			radeon_sa_bo_try_free(sa_manager);
342*4882a593Smuzhiyun 
343*4882a593Smuzhiyun 			if (radeon_sa_bo_try_alloc(sa_manager, *sa_bo,
344*4882a593Smuzhiyun 						   size, align)) {
345*4882a593Smuzhiyun 				spin_unlock(&sa_manager->wq.lock);
346*4882a593Smuzhiyun 				return 0;
347*4882a593Smuzhiyun 			}
348*4882a593Smuzhiyun 
349*4882a593Smuzhiyun 			/* see if we can skip over some allocations */
350*4882a593Smuzhiyun 		} while (radeon_sa_bo_next_hole(sa_manager, fences, tries));
351*4882a593Smuzhiyun 
352*4882a593Smuzhiyun 		for (i = 0; i < RADEON_NUM_RINGS; ++i)
353*4882a593Smuzhiyun 			radeon_fence_ref(fences[i]);
354*4882a593Smuzhiyun 
355*4882a593Smuzhiyun 		spin_unlock(&sa_manager->wq.lock);
356*4882a593Smuzhiyun 		r = radeon_fence_wait_any(rdev, fences, false);
357*4882a593Smuzhiyun 		for (i = 0; i < RADEON_NUM_RINGS; ++i)
358*4882a593Smuzhiyun 			radeon_fence_unref(&fences[i]);
359*4882a593Smuzhiyun 		spin_lock(&sa_manager->wq.lock);
360*4882a593Smuzhiyun 		/* if we have nothing to wait for block */
361*4882a593Smuzhiyun 		if (r == -ENOENT) {
362*4882a593Smuzhiyun 			r = wait_event_interruptible_locked(
363*4882a593Smuzhiyun 				sa_manager->wq,
364*4882a593Smuzhiyun 				radeon_sa_event(sa_manager, size, align)
365*4882a593Smuzhiyun 			);
366*4882a593Smuzhiyun 		}
367*4882a593Smuzhiyun 
368*4882a593Smuzhiyun 	} while (!r);
369*4882a593Smuzhiyun 
370*4882a593Smuzhiyun 	spin_unlock(&sa_manager->wq.lock);
371*4882a593Smuzhiyun 	kfree(*sa_bo);
372*4882a593Smuzhiyun 	*sa_bo = NULL;
373*4882a593Smuzhiyun 	return r;
374*4882a593Smuzhiyun }
375*4882a593Smuzhiyun 
radeon_sa_bo_free(struct radeon_device * rdev,struct radeon_sa_bo ** sa_bo,struct radeon_fence * fence)376*4882a593Smuzhiyun void radeon_sa_bo_free(struct radeon_device *rdev, struct radeon_sa_bo **sa_bo,
377*4882a593Smuzhiyun 		       struct radeon_fence *fence)
378*4882a593Smuzhiyun {
379*4882a593Smuzhiyun 	struct radeon_sa_manager *sa_manager;
380*4882a593Smuzhiyun 
381*4882a593Smuzhiyun 	if (sa_bo == NULL || *sa_bo == NULL) {
382*4882a593Smuzhiyun 		return;
383*4882a593Smuzhiyun 	}
384*4882a593Smuzhiyun 
385*4882a593Smuzhiyun 	sa_manager = (*sa_bo)->manager;
386*4882a593Smuzhiyun 	spin_lock(&sa_manager->wq.lock);
387*4882a593Smuzhiyun 	if (fence && !radeon_fence_signaled(fence)) {
388*4882a593Smuzhiyun 		(*sa_bo)->fence = radeon_fence_ref(fence);
389*4882a593Smuzhiyun 		list_add_tail(&(*sa_bo)->flist,
390*4882a593Smuzhiyun 			      &sa_manager->flist[fence->ring]);
391*4882a593Smuzhiyun 	} else {
392*4882a593Smuzhiyun 		radeon_sa_bo_remove_locked(*sa_bo);
393*4882a593Smuzhiyun 	}
394*4882a593Smuzhiyun 	wake_up_all_locked(&sa_manager->wq);
395*4882a593Smuzhiyun 	spin_unlock(&sa_manager->wq.lock);
396*4882a593Smuzhiyun 	*sa_bo = NULL;
397*4882a593Smuzhiyun }
398*4882a593Smuzhiyun 
399*4882a593Smuzhiyun #if defined(CONFIG_DEBUG_FS)
radeon_sa_bo_dump_debug_info(struct radeon_sa_manager * sa_manager,struct seq_file * m)400*4882a593Smuzhiyun void radeon_sa_bo_dump_debug_info(struct radeon_sa_manager *sa_manager,
401*4882a593Smuzhiyun 				  struct seq_file *m)
402*4882a593Smuzhiyun {
403*4882a593Smuzhiyun 	struct radeon_sa_bo *i;
404*4882a593Smuzhiyun 
405*4882a593Smuzhiyun 	spin_lock(&sa_manager->wq.lock);
406*4882a593Smuzhiyun 	list_for_each_entry(i, &sa_manager->olist, olist) {
407*4882a593Smuzhiyun 		uint64_t soffset = i->soffset + sa_manager->gpu_addr;
408*4882a593Smuzhiyun 		uint64_t eoffset = i->eoffset + sa_manager->gpu_addr;
409*4882a593Smuzhiyun 		if (&i->olist == sa_manager->hole) {
410*4882a593Smuzhiyun 			seq_printf(m, ">");
411*4882a593Smuzhiyun 		} else {
412*4882a593Smuzhiyun 			seq_printf(m, " ");
413*4882a593Smuzhiyun 		}
414*4882a593Smuzhiyun 		seq_printf(m, "[0x%010llx 0x%010llx] size %8lld",
415*4882a593Smuzhiyun 			   soffset, eoffset, eoffset - soffset);
416*4882a593Smuzhiyun 		if (i->fence) {
417*4882a593Smuzhiyun 			seq_printf(m, " protected by 0x%016llx on ring %d",
418*4882a593Smuzhiyun 				   i->fence->seq, i->fence->ring);
419*4882a593Smuzhiyun 		}
420*4882a593Smuzhiyun 		seq_printf(m, "\n");
421*4882a593Smuzhiyun 	}
422*4882a593Smuzhiyun 	spin_unlock(&sa_manager->wq.lock);
423*4882a593Smuzhiyun }
424*4882a593Smuzhiyun #endif
425