xref: /OK3568_Linux_fs/kernel/drivers/gpu/drm/i915/i915_buddy.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: MIT
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Copyright © 2019 Intel Corporation
4*4882a593Smuzhiyun  */
5*4882a593Smuzhiyun 
6*4882a593Smuzhiyun #include <linux/kmemleak.h>
7*4882a593Smuzhiyun #include <linux/slab.h>
8*4882a593Smuzhiyun 
9*4882a593Smuzhiyun #include "i915_buddy.h"
10*4882a593Smuzhiyun 
11*4882a593Smuzhiyun #include "i915_gem.h"
12*4882a593Smuzhiyun #include "i915_globals.h"
13*4882a593Smuzhiyun #include "i915_utils.h"
14*4882a593Smuzhiyun 
15*4882a593Smuzhiyun static struct i915_global_block {
16*4882a593Smuzhiyun 	struct i915_global base;
17*4882a593Smuzhiyun 	struct kmem_cache *slab_blocks;
18*4882a593Smuzhiyun } global;
19*4882a593Smuzhiyun 
i915_global_buddy_shrink(void)20*4882a593Smuzhiyun static void i915_global_buddy_shrink(void)
21*4882a593Smuzhiyun {
22*4882a593Smuzhiyun 	kmem_cache_shrink(global.slab_blocks);
23*4882a593Smuzhiyun }
24*4882a593Smuzhiyun 
i915_global_buddy_exit(void)25*4882a593Smuzhiyun static void i915_global_buddy_exit(void)
26*4882a593Smuzhiyun {
27*4882a593Smuzhiyun 	kmem_cache_destroy(global.slab_blocks);
28*4882a593Smuzhiyun }
29*4882a593Smuzhiyun 
30*4882a593Smuzhiyun static struct i915_global_block global = { {
31*4882a593Smuzhiyun 	.shrink = i915_global_buddy_shrink,
32*4882a593Smuzhiyun 	.exit = i915_global_buddy_exit,
33*4882a593Smuzhiyun } };
34*4882a593Smuzhiyun 
i915_global_buddy_init(void)35*4882a593Smuzhiyun int __init i915_global_buddy_init(void)
36*4882a593Smuzhiyun {
37*4882a593Smuzhiyun 	global.slab_blocks = KMEM_CACHE(i915_buddy_block, SLAB_HWCACHE_ALIGN);
38*4882a593Smuzhiyun 	if (!global.slab_blocks)
39*4882a593Smuzhiyun 		return -ENOMEM;
40*4882a593Smuzhiyun 
41*4882a593Smuzhiyun 	i915_global_register(&global.base);
42*4882a593Smuzhiyun 	return 0;
43*4882a593Smuzhiyun }
44*4882a593Smuzhiyun 
i915_block_alloc(struct i915_buddy_block * parent,unsigned int order,u64 offset)45*4882a593Smuzhiyun static struct i915_buddy_block *i915_block_alloc(struct i915_buddy_block *parent,
46*4882a593Smuzhiyun 						 unsigned int order,
47*4882a593Smuzhiyun 						 u64 offset)
48*4882a593Smuzhiyun {
49*4882a593Smuzhiyun 	struct i915_buddy_block *block;
50*4882a593Smuzhiyun 
51*4882a593Smuzhiyun 	block = kmem_cache_zalloc(global.slab_blocks, GFP_KERNEL);
52*4882a593Smuzhiyun 	if (!block)
53*4882a593Smuzhiyun 		return NULL;
54*4882a593Smuzhiyun 
55*4882a593Smuzhiyun 	block->header = offset;
56*4882a593Smuzhiyun 	block->header |= order;
57*4882a593Smuzhiyun 	block->parent = parent;
58*4882a593Smuzhiyun 
59*4882a593Smuzhiyun 	return block;
60*4882a593Smuzhiyun }
61*4882a593Smuzhiyun 
i915_block_free(struct i915_buddy_block * block)62*4882a593Smuzhiyun static void i915_block_free(struct i915_buddy_block *block)
63*4882a593Smuzhiyun {
64*4882a593Smuzhiyun 	kmem_cache_free(global.slab_blocks, block);
65*4882a593Smuzhiyun }
66*4882a593Smuzhiyun 
mark_allocated(struct i915_buddy_block * block)67*4882a593Smuzhiyun static void mark_allocated(struct i915_buddy_block *block)
68*4882a593Smuzhiyun {
69*4882a593Smuzhiyun 	block->header &= ~I915_BUDDY_HEADER_STATE;
70*4882a593Smuzhiyun 	block->header |= I915_BUDDY_ALLOCATED;
71*4882a593Smuzhiyun 
72*4882a593Smuzhiyun 	list_del(&block->link);
73*4882a593Smuzhiyun }
74*4882a593Smuzhiyun 
mark_free(struct i915_buddy_mm * mm,struct i915_buddy_block * block)75*4882a593Smuzhiyun static void mark_free(struct i915_buddy_mm *mm,
76*4882a593Smuzhiyun 		      struct i915_buddy_block *block)
77*4882a593Smuzhiyun {
78*4882a593Smuzhiyun 	block->header &= ~I915_BUDDY_HEADER_STATE;
79*4882a593Smuzhiyun 	block->header |= I915_BUDDY_FREE;
80*4882a593Smuzhiyun 
81*4882a593Smuzhiyun 	list_add(&block->link,
82*4882a593Smuzhiyun 		 &mm->free_list[i915_buddy_block_order(block)]);
83*4882a593Smuzhiyun }
84*4882a593Smuzhiyun 
mark_split(struct i915_buddy_block * block)85*4882a593Smuzhiyun static void mark_split(struct i915_buddy_block *block)
86*4882a593Smuzhiyun {
87*4882a593Smuzhiyun 	block->header &= ~I915_BUDDY_HEADER_STATE;
88*4882a593Smuzhiyun 	block->header |= I915_BUDDY_SPLIT;
89*4882a593Smuzhiyun 
90*4882a593Smuzhiyun 	list_del(&block->link);
91*4882a593Smuzhiyun }
92*4882a593Smuzhiyun 
i915_buddy_init(struct i915_buddy_mm * mm,u64 size,u64 chunk_size)93*4882a593Smuzhiyun int i915_buddy_init(struct i915_buddy_mm *mm, u64 size, u64 chunk_size)
94*4882a593Smuzhiyun {
95*4882a593Smuzhiyun 	unsigned int i;
96*4882a593Smuzhiyun 	u64 offset;
97*4882a593Smuzhiyun 
98*4882a593Smuzhiyun 	if (size < chunk_size)
99*4882a593Smuzhiyun 		return -EINVAL;
100*4882a593Smuzhiyun 
101*4882a593Smuzhiyun 	if (chunk_size < PAGE_SIZE)
102*4882a593Smuzhiyun 		return -EINVAL;
103*4882a593Smuzhiyun 
104*4882a593Smuzhiyun 	if (!is_power_of_2(chunk_size))
105*4882a593Smuzhiyun 		return -EINVAL;
106*4882a593Smuzhiyun 
107*4882a593Smuzhiyun 	size = round_down(size, chunk_size);
108*4882a593Smuzhiyun 
109*4882a593Smuzhiyun 	mm->size = size;
110*4882a593Smuzhiyun 	mm->chunk_size = chunk_size;
111*4882a593Smuzhiyun 	mm->max_order = ilog2(size) - ilog2(chunk_size);
112*4882a593Smuzhiyun 
113*4882a593Smuzhiyun 	GEM_BUG_ON(mm->max_order > I915_BUDDY_MAX_ORDER);
114*4882a593Smuzhiyun 
115*4882a593Smuzhiyun 	mm->free_list = kmalloc_array(mm->max_order + 1,
116*4882a593Smuzhiyun 				      sizeof(struct list_head),
117*4882a593Smuzhiyun 				      GFP_KERNEL);
118*4882a593Smuzhiyun 	if (!mm->free_list)
119*4882a593Smuzhiyun 		return -ENOMEM;
120*4882a593Smuzhiyun 
121*4882a593Smuzhiyun 	for (i = 0; i <= mm->max_order; ++i)
122*4882a593Smuzhiyun 		INIT_LIST_HEAD(&mm->free_list[i]);
123*4882a593Smuzhiyun 
124*4882a593Smuzhiyun 	mm->n_roots = hweight64(size);
125*4882a593Smuzhiyun 
126*4882a593Smuzhiyun 	mm->roots = kmalloc_array(mm->n_roots,
127*4882a593Smuzhiyun 				  sizeof(struct i915_buddy_block *),
128*4882a593Smuzhiyun 				  GFP_KERNEL);
129*4882a593Smuzhiyun 	if (!mm->roots)
130*4882a593Smuzhiyun 		goto out_free_list;
131*4882a593Smuzhiyun 
132*4882a593Smuzhiyun 	offset = 0;
133*4882a593Smuzhiyun 	i = 0;
134*4882a593Smuzhiyun 
135*4882a593Smuzhiyun 	/*
136*4882a593Smuzhiyun 	 * Split into power-of-two blocks, in case we are given a size that is
137*4882a593Smuzhiyun 	 * not itself a power-of-two.
138*4882a593Smuzhiyun 	 */
139*4882a593Smuzhiyun 	do {
140*4882a593Smuzhiyun 		struct i915_buddy_block *root;
141*4882a593Smuzhiyun 		unsigned int order;
142*4882a593Smuzhiyun 		u64 root_size;
143*4882a593Smuzhiyun 
144*4882a593Smuzhiyun 		root_size = rounddown_pow_of_two(size);
145*4882a593Smuzhiyun 		order = ilog2(root_size) - ilog2(chunk_size);
146*4882a593Smuzhiyun 
147*4882a593Smuzhiyun 		root = i915_block_alloc(NULL, order, offset);
148*4882a593Smuzhiyun 		if (!root)
149*4882a593Smuzhiyun 			goto out_free_roots;
150*4882a593Smuzhiyun 
151*4882a593Smuzhiyun 		mark_free(mm, root);
152*4882a593Smuzhiyun 
153*4882a593Smuzhiyun 		GEM_BUG_ON(i > mm->max_order);
154*4882a593Smuzhiyun 		GEM_BUG_ON(i915_buddy_block_size(mm, root) < chunk_size);
155*4882a593Smuzhiyun 
156*4882a593Smuzhiyun 		mm->roots[i] = root;
157*4882a593Smuzhiyun 
158*4882a593Smuzhiyun 		offset += root_size;
159*4882a593Smuzhiyun 		size -= root_size;
160*4882a593Smuzhiyun 		i++;
161*4882a593Smuzhiyun 	} while (size);
162*4882a593Smuzhiyun 
163*4882a593Smuzhiyun 	return 0;
164*4882a593Smuzhiyun 
165*4882a593Smuzhiyun out_free_roots:
166*4882a593Smuzhiyun 	while (i--)
167*4882a593Smuzhiyun 		i915_block_free(mm->roots[i]);
168*4882a593Smuzhiyun 	kfree(mm->roots);
169*4882a593Smuzhiyun out_free_list:
170*4882a593Smuzhiyun 	kfree(mm->free_list);
171*4882a593Smuzhiyun 	return -ENOMEM;
172*4882a593Smuzhiyun }
173*4882a593Smuzhiyun 
i915_buddy_fini(struct i915_buddy_mm * mm)174*4882a593Smuzhiyun void i915_buddy_fini(struct i915_buddy_mm *mm)
175*4882a593Smuzhiyun {
176*4882a593Smuzhiyun 	int i;
177*4882a593Smuzhiyun 
178*4882a593Smuzhiyun 	for (i = 0; i < mm->n_roots; ++i) {
179*4882a593Smuzhiyun 		GEM_WARN_ON(!i915_buddy_block_is_free(mm->roots[i]));
180*4882a593Smuzhiyun 		i915_block_free(mm->roots[i]);
181*4882a593Smuzhiyun 	}
182*4882a593Smuzhiyun 
183*4882a593Smuzhiyun 	kfree(mm->roots);
184*4882a593Smuzhiyun 	kfree(mm->free_list);
185*4882a593Smuzhiyun }
186*4882a593Smuzhiyun 
split_block(struct i915_buddy_mm * mm,struct i915_buddy_block * block)187*4882a593Smuzhiyun static int split_block(struct i915_buddy_mm *mm,
188*4882a593Smuzhiyun 		       struct i915_buddy_block *block)
189*4882a593Smuzhiyun {
190*4882a593Smuzhiyun 	unsigned int block_order = i915_buddy_block_order(block) - 1;
191*4882a593Smuzhiyun 	u64 offset = i915_buddy_block_offset(block);
192*4882a593Smuzhiyun 
193*4882a593Smuzhiyun 	GEM_BUG_ON(!i915_buddy_block_is_free(block));
194*4882a593Smuzhiyun 	GEM_BUG_ON(!i915_buddy_block_order(block));
195*4882a593Smuzhiyun 
196*4882a593Smuzhiyun 	block->left = i915_block_alloc(block, block_order, offset);
197*4882a593Smuzhiyun 	if (!block->left)
198*4882a593Smuzhiyun 		return -ENOMEM;
199*4882a593Smuzhiyun 
200*4882a593Smuzhiyun 	block->right = i915_block_alloc(block, block_order,
201*4882a593Smuzhiyun 					offset + (mm->chunk_size << block_order));
202*4882a593Smuzhiyun 	if (!block->right) {
203*4882a593Smuzhiyun 		i915_block_free(block->left);
204*4882a593Smuzhiyun 		return -ENOMEM;
205*4882a593Smuzhiyun 	}
206*4882a593Smuzhiyun 
207*4882a593Smuzhiyun 	mark_free(mm, block->left);
208*4882a593Smuzhiyun 	mark_free(mm, block->right);
209*4882a593Smuzhiyun 
210*4882a593Smuzhiyun 	mark_split(block);
211*4882a593Smuzhiyun 
212*4882a593Smuzhiyun 	return 0;
213*4882a593Smuzhiyun }
214*4882a593Smuzhiyun 
215*4882a593Smuzhiyun static struct i915_buddy_block *
get_buddy(struct i915_buddy_block * block)216*4882a593Smuzhiyun get_buddy(struct i915_buddy_block *block)
217*4882a593Smuzhiyun {
218*4882a593Smuzhiyun 	struct i915_buddy_block *parent;
219*4882a593Smuzhiyun 
220*4882a593Smuzhiyun 	parent = block->parent;
221*4882a593Smuzhiyun 	if (!parent)
222*4882a593Smuzhiyun 		return NULL;
223*4882a593Smuzhiyun 
224*4882a593Smuzhiyun 	if (parent->left == block)
225*4882a593Smuzhiyun 		return parent->right;
226*4882a593Smuzhiyun 
227*4882a593Smuzhiyun 	return parent->left;
228*4882a593Smuzhiyun }
229*4882a593Smuzhiyun 
__i915_buddy_free(struct i915_buddy_mm * mm,struct i915_buddy_block * block)230*4882a593Smuzhiyun static void __i915_buddy_free(struct i915_buddy_mm *mm,
231*4882a593Smuzhiyun 			      struct i915_buddy_block *block)
232*4882a593Smuzhiyun {
233*4882a593Smuzhiyun 	struct i915_buddy_block *parent;
234*4882a593Smuzhiyun 
235*4882a593Smuzhiyun 	while ((parent = block->parent)) {
236*4882a593Smuzhiyun 		struct i915_buddy_block *buddy;
237*4882a593Smuzhiyun 
238*4882a593Smuzhiyun 		buddy = get_buddy(block);
239*4882a593Smuzhiyun 
240*4882a593Smuzhiyun 		if (!i915_buddy_block_is_free(buddy))
241*4882a593Smuzhiyun 			break;
242*4882a593Smuzhiyun 
243*4882a593Smuzhiyun 		list_del(&buddy->link);
244*4882a593Smuzhiyun 
245*4882a593Smuzhiyun 		i915_block_free(block);
246*4882a593Smuzhiyun 		i915_block_free(buddy);
247*4882a593Smuzhiyun 
248*4882a593Smuzhiyun 		block = parent;
249*4882a593Smuzhiyun 	}
250*4882a593Smuzhiyun 
251*4882a593Smuzhiyun 	mark_free(mm, block);
252*4882a593Smuzhiyun }
253*4882a593Smuzhiyun 
i915_buddy_free(struct i915_buddy_mm * mm,struct i915_buddy_block * block)254*4882a593Smuzhiyun void i915_buddy_free(struct i915_buddy_mm *mm,
255*4882a593Smuzhiyun 		     struct i915_buddy_block *block)
256*4882a593Smuzhiyun {
257*4882a593Smuzhiyun 	GEM_BUG_ON(!i915_buddy_block_is_allocated(block));
258*4882a593Smuzhiyun 	__i915_buddy_free(mm, block);
259*4882a593Smuzhiyun }
260*4882a593Smuzhiyun 
i915_buddy_free_list(struct i915_buddy_mm * mm,struct list_head * objects)261*4882a593Smuzhiyun void i915_buddy_free_list(struct i915_buddy_mm *mm, struct list_head *objects)
262*4882a593Smuzhiyun {
263*4882a593Smuzhiyun 	struct i915_buddy_block *block, *on;
264*4882a593Smuzhiyun 
265*4882a593Smuzhiyun 	list_for_each_entry_safe(block, on, objects, link) {
266*4882a593Smuzhiyun 		i915_buddy_free(mm, block);
267*4882a593Smuzhiyun 		cond_resched();
268*4882a593Smuzhiyun 	}
269*4882a593Smuzhiyun 	INIT_LIST_HEAD(objects);
270*4882a593Smuzhiyun }
271*4882a593Smuzhiyun 
272*4882a593Smuzhiyun /*
273*4882a593Smuzhiyun  * Allocate power-of-two block. The order value here translates to:
274*4882a593Smuzhiyun  *
275*4882a593Smuzhiyun  *   0 = 2^0 * mm->chunk_size
276*4882a593Smuzhiyun  *   1 = 2^1 * mm->chunk_size
277*4882a593Smuzhiyun  *   2 = 2^2 * mm->chunk_size
278*4882a593Smuzhiyun  *   ...
279*4882a593Smuzhiyun  */
280*4882a593Smuzhiyun struct i915_buddy_block *
i915_buddy_alloc(struct i915_buddy_mm * mm,unsigned int order)281*4882a593Smuzhiyun i915_buddy_alloc(struct i915_buddy_mm *mm, unsigned int order)
282*4882a593Smuzhiyun {
283*4882a593Smuzhiyun 	struct i915_buddy_block *block = NULL;
284*4882a593Smuzhiyun 	unsigned int i;
285*4882a593Smuzhiyun 	int err;
286*4882a593Smuzhiyun 
287*4882a593Smuzhiyun 	for (i = order; i <= mm->max_order; ++i) {
288*4882a593Smuzhiyun 		block = list_first_entry_or_null(&mm->free_list[i],
289*4882a593Smuzhiyun 						 struct i915_buddy_block,
290*4882a593Smuzhiyun 						 link);
291*4882a593Smuzhiyun 		if (block)
292*4882a593Smuzhiyun 			break;
293*4882a593Smuzhiyun 	}
294*4882a593Smuzhiyun 
295*4882a593Smuzhiyun 	if (!block)
296*4882a593Smuzhiyun 		return ERR_PTR(-ENOSPC);
297*4882a593Smuzhiyun 
298*4882a593Smuzhiyun 	GEM_BUG_ON(!i915_buddy_block_is_free(block));
299*4882a593Smuzhiyun 
300*4882a593Smuzhiyun 	while (i != order) {
301*4882a593Smuzhiyun 		err = split_block(mm, block);
302*4882a593Smuzhiyun 		if (unlikely(err))
303*4882a593Smuzhiyun 			goto out_free;
304*4882a593Smuzhiyun 
305*4882a593Smuzhiyun 		/* Go low */
306*4882a593Smuzhiyun 		block = block->left;
307*4882a593Smuzhiyun 		i--;
308*4882a593Smuzhiyun 	}
309*4882a593Smuzhiyun 
310*4882a593Smuzhiyun 	mark_allocated(block);
311*4882a593Smuzhiyun 	kmemleak_update_trace(block);
312*4882a593Smuzhiyun 	return block;
313*4882a593Smuzhiyun 
314*4882a593Smuzhiyun out_free:
315*4882a593Smuzhiyun 	if (i != order)
316*4882a593Smuzhiyun 		__i915_buddy_free(mm, block);
317*4882a593Smuzhiyun 	return ERR_PTR(err);
318*4882a593Smuzhiyun }
319*4882a593Smuzhiyun 
overlaps(u64 s1,u64 e1,u64 s2,u64 e2)320*4882a593Smuzhiyun static inline bool overlaps(u64 s1, u64 e1, u64 s2, u64 e2)
321*4882a593Smuzhiyun {
322*4882a593Smuzhiyun 	return s1 <= e2 && e1 >= s2;
323*4882a593Smuzhiyun }
324*4882a593Smuzhiyun 
contains(u64 s1,u64 e1,u64 s2,u64 e2)325*4882a593Smuzhiyun static inline bool contains(u64 s1, u64 e1, u64 s2, u64 e2)
326*4882a593Smuzhiyun {
327*4882a593Smuzhiyun 	return s1 <= s2 && e1 >= e2;
328*4882a593Smuzhiyun }
329*4882a593Smuzhiyun 
330*4882a593Smuzhiyun /*
331*4882a593Smuzhiyun  * Allocate range. Note that it's safe to chain together multiple alloc_ranges
332*4882a593Smuzhiyun  * with the same blocks list.
333*4882a593Smuzhiyun  *
334*4882a593Smuzhiyun  * Intended for pre-allocating portions of the address space, for example to
335*4882a593Smuzhiyun  * reserve a block for the initial framebuffer or similar, hence the expectation
336*4882a593Smuzhiyun  * here is that i915_buddy_alloc() is still the main vehicle for
337*4882a593Smuzhiyun  * allocations, so if that's not the case then the drm_mm range allocator is
338*4882a593Smuzhiyun  * probably a much better fit, and so you should probably go use that instead.
339*4882a593Smuzhiyun  */
i915_buddy_alloc_range(struct i915_buddy_mm * mm,struct list_head * blocks,u64 start,u64 size)340*4882a593Smuzhiyun int i915_buddy_alloc_range(struct i915_buddy_mm *mm,
341*4882a593Smuzhiyun 			   struct list_head *blocks,
342*4882a593Smuzhiyun 			   u64 start, u64 size)
343*4882a593Smuzhiyun {
344*4882a593Smuzhiyun 	struct i915_buddy_block *block;
345*4882a593Smuzhiyun 	struct i915_buddy_block *buddy;
346*4882a593Smuzhiyun 	LIST_HEAD(allocated);
347*4882a593Smuzhiyun 	LIST_HEAD(dfs);
348*4882a593Smuzhiyun 	u64 end;
349*4882a593Smuzhiyun 	int err;
350*4882a593Smuzhiyun 	int i;
351*4882a593Smuzhiyun 
352*4882a593Smuzhiyun 	if (size < mm->chunk_size)
353*4882a593Smuzhiyun 		return -EINVAL;
354*4882a593Smuzhiyun 
355*4882a593Smuzhiyun 	if (!IS_ALIGNED(size | start, mm->chunk_size))
356*4882a593Smuzhiyun 		return -EINVAL;
357*4882a593Smuzhiyun 
358*4882a593Smuzhiyun 	if (range_overflows(start, size, mm->size))
359*4882a593Smuzhiyun 		return -EINVAL;
360*4882a593Smuzhiyun 
361*4882a593Smuzhiyun 	for (i = 0; i < mm->n_roots; ++i)
362*4882a593Smuzhiyun 		list_add_tail(&mm->roots[i]->tmp_link, &dfs);
363*4882a593Smuzhiyun 
364*4882a593Smuzhiyun 	end = start + size - 1;
365*4882a593Smuzhiyun 
366*4882a593Smuzhiyun 	do {
367*4882a593Smuzhiyun 		u64 block_start;
368*4882a593Smuzhiyun 		u64 block_end;
369*4882a593Smuzhiyun 
370*4882a593Smuzhiyun 		block = list_first_entry_or_null(&dfs,
371*4882a593Smuzhiyun 						 struct i915_buddy_block,
372*4882a593Smuzhiyun 						 tmp_link);
373*4882a593Smuzhiyun 		if (!block)
374*4882a593Smuzhiyun 			break;
375*4882a593Smuzhiyun 
376*4882a593Smuzhiyun 		list_del(&block->tmp_link);
377*4882a593Smuzhiyun 
378*4882a593Smuzhiyun 		block_start = i915_buddy_block_offset(block);
379*4882a593Smuzhiyun 		block_end = block_start + i915_buddy_block_size(mm, block) - 1;
380*4882a593Smuzhiyun 
381*4882a593Smuzhiyun 		if (!overlaps(start, end, block_start, block_end))
382*4882a593Smuzhiyun 			continue;
383*4882a593Smuzhiyun 
384*4882a593Smuzhiyun 		if (i915_buddy_block_is_allocated(block)) {
385*4882a593Smuzhiyun 			err = -ENOSPC;
386*4882a593Smuzhiyun 			goto err_free;
387*4882a593Smuzhiyun 		}
388*4882a593Smuzhiyun 
389*4882a593Smuzhiyun 		if (contains(start, end, block_start, block_end)) {
390*4882a593Smuzhiyun 			if (!i915_buddy_block_is_free(block)) {
391*4882a593Smuzhiyun 				err = -ENOSPC;
392*4882a593Smuzhiyun 				goto err_free;
393*4882a593Smuzhiyun 			}
394*4882a593Smuzhiyun 
395*4882a593Smuzhiyun 			mark_allocated(block);
396*4882a593Smuzhiyun 			list_add_tail(&block->link, &allocated);
397*4882a593Smuzhiyun 			continue;
398*4882a593Smuzhiyun 		}
399*4882a593Smuzhiyun 
400*4882a593Smuzhiyun 		if (!i915_buddy_block_is_split(block)) {
401*4882a593Smuzhiyun 			err = split_block(mm, block);
402*4882a593Smuzhiyun 			if (unlikely(err))
403*4882a593Smuzhiyun 				goto err_undo;
404*4882a593Smuzhiyun 		}
405*4882a593Smuzhiyun 
406*4882a593Smuzhiyun 		list_add(&block->right->tmp_link, &dfs);
407*4882a593Smuzhiyun 		list_add(&block->left->tmp_link, &dfs);
408*4882a593Smuzhiyun 	} while (1);
409*4882a593Smuzhiyun 
410*4882a593Smuzhiyun 	list_splice_tail(&allocated, blocks);
411*4882a593Smuzhiyun 	return 0;
412*4882a593Smuzhiyun 
413*4882a593Smuzhiyun err_undo:
414*4882a593Smuzhiyun 	/*
415*4882a593Smuzhiyun 	 * We really don't want to leave around a bunch of split blocks, since
416*4882a593Smuzhiyun 	 * bigger is better, so make sure we merge everything back before we
417*4882a593Smuzhiyun 	 * free the allocated blocks.
418*4882a593Smuzhiyun 	 */
419*4882a593Smuzhiyun 	buddy = get_buddy(block);
420*4882a593Smuzhiyun 	if (buddy &&
421*4882a593Smuzhiyun 	    (i915_buddy_block_is_free(block) &&
422*4882a593Smuzhiyun 	     i915_buddy_block_is_free(buddy)))
423*4882a593Smuzhiyun 		__i915_buddy_free(mm, block);
424*4882a593Smuzhiyun 
425*4882a593Smuzhiyun err_free:
426*4882a593Smuzhiyun 	i915_buddy_free_list(mm, &allocated);
427*4882a593Smuzhiyun 	return err;
428*4882a593Smuzhiyun }
429*4882a593Smuzhiyun 
430*4882a593Smuzhiyun #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
431*4882a593Smuzhiyun #include "selftests/i915_buddy.c"
432*4882a593Smuzhiyun #endif
433