xref: /OK3568_Linux_fs/kernel/drivers/gpu/drm/i915/selftests/i915_buddy.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: MIT
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Copyright © 2019 Intel Corporation
4*4882a593Smuzhiyun  */
5*4882a593Smuzhiyun 
6*4882a593Smuzhiyun #include <linux/prime_numbers.h>
7*4882a593Smuzhiyun 
8*4882a593Smuzhiyun #include "../i915_selftest.h"
9*4882a593Smuzhiyun #include "i915_random.h"
10*4882a593Smuzhiyun 
__igt_dump_block(struct i915_buddy_mm * mm,struct i915_buddy_block * block,bool buddy)11*4882a593Smuzhiyun static void __igt_dump_block(struct i915_buddy_mm *mm,
12*4882a593Smuzhiyun 			     struct i915_buddy_block *block,
13*4882a593Smuzhiyun 			     bool buddy)
14*4882a593Smuzhiyun {
15*4882a593Smuzhiyun 	pr_err("block info: header=%llx, state=%u, order=%d, offset=%llx size=%llx root=%s buddy=%s\n",
16*4882a593Smuzhiyun 	       block->header,
17*4882a593Smuzhiyun 	       i915_buddy_block_state(block),
18*4882a593Smuzhiyun 	       i915_buddy_block_order(block),
19*4882a593Smuzhiyun 	       i915_buddy_block_offset(block),
20*4882a593Smuzhiyun 	       i915_buddy_block_size(mm, block),
21*4882a593Smuzhiyun 	       yesno(!block->parent),
22*4882a593Smuzhiyun 	       yesno(buddy));
23*4882a593Smuzhiyun }
24*4882a593Smuzhiyun 
igt_dump_block(struct i915_buddy_mm * mm,struct i915_buddy_block * block)25*4882a593Smuzhiyun static void igt_dump_block(struct i915_buddy_mm *mm,
26*4882a593Smuzhiyun 			   struct i915_buddy_block *block)
27*4882a593Smuzhiyun {
28*4882a593Smuzhiyun 	struct i915_buddy_block *buddy;
29*4882a593Smuzhiyun 
30*4882a593Smuzhiyun 	__igt_dump_block(mm, block, false);
31*4882a593Smuzhiyun 
32*4882a593Smuzhiyun 	buddy = get_buddy(block);
33*4882a593Smuzhiyun 	if (buddy)
34*4882a593Smuzhiyun 		__igt_dump_block(mm, buddy, true);
35*4882a593Smuzhiyun }
36*4882a593Smuzhiyun 
igt_check_block(struct i915_buddy_mm * mm,struct i915_buddy_block * block)37*4882a593Smuzhiyun static int igt_check_block(struct i915_buddy_mm *mm,
38*4882a593Smuzhiyun 			   struct i915_buddy_block *block)
39*4882a593Smuzhiyun {
40*4882a593Smuzhiyun 	struct i915_buddy_block *buddy;
41*4882a593Smuzhiyun 	unsigned int block_state;
42*4882a593Smuzhiyun 	u64 block_size;
43*4882a593Smuzhiyun 	u64 offset;
44*4882a593Smuzhiyun 	int err = 0;
45*4882a593Smuzhiyun 
46*4882a593Smuzhiyun 	block_state = i915_buddy_block_state(block);
47*4882a593Smuzhiyun 
48*4882a593Smuzhiyun 	if (block_state != I915_BUDDY_ALLOCATED &&
49*4882a593Smuzhiyun 	    block_state != I915_BUDDY_FREE &&
50*4882a593Smuzhiyun 	    block_state != I915_BUDDY_SPLIT) {
51*4882a593Smuzhiyun 		pr_err("block state mismatch\n");
52*4882a593Smuzhiyun 		err = -EINVAL;
53*4882a593Smuzhiyun 	}
54*4882a593Smuzhiyun 
55*4882a593Smuzhiyun 	block_size = i915_buddy_block_size(mm, block);
56*4882a593Smuzhiyun 	offset = i915_buddy_block_offset(block);
57*4882a593Smuzhiyun 
58*4882a593Smuzhiyun 	if (block_size < mm->chunk_size) {
59*4882a593Smuzhiyun 		pr_err("block size smaller than min size\n");
60*4882a593Smuzhiyun 		err = -EINVAL;
61*4882a593Smuzhiyun 	}
62*4882a593Smuzhiyun 
63*4882a593Smuzhiyun 	if (!is_power_of_2(block_size)) {
64*4882a593Smuzhiyun 		pr_err("block size not power of two\n");
65*4882a593Smuzhiyun 		err = -EINVAL;
66*4882a593Smuzhiyun 	}
67*4882a593Smuzhiyun 
68*4882a593Smuzhiyun 	if (!IS_ALIGNED(block_size, mm->chunk_size)) {
69*4882a593Smuzhiyun 		pr_err("block size not aligned to min size\n");
70*4882a593Smuzhiyun 		err = -EINVAL;
71*4882a593Smuzhiyun 	}
72*4882a593Smuzhiyun 
73*4882a593Smuzhiyun 	if (!IS_ALIGNED(offset, mm->chunk_size)) {
74*4882a593Smuzhiyun 		pr_err("block offset not aligned to min size\n");
75*4882a593Smuzhiyun 		err = -EINVAL;
76*4882a593Smuzhiyun 	}
77*4882a593Smuzhiyun 
78*4882a593Smuzhiyun 	if (!IS_ALIGNED(offset, block_size)) {
79*4882a593Smuzhiyun 		pr_err("block offset not aligned to block size\n");
80*4882a593Smuzhiyun 		err = -EINVAL;
81*4882a593Smuzhiyun 	}
82*4882a593Smuzhiyun 
83*4882a593Smuzhiyun 	buddy = get_buddy(block);
84*4882a593Smuzhiyun 
85*4882a593Smuzhiyun 	if (!buddy && block->parent) {
86*4882a593Smuzhiyun 		pr_err("buddy has gone fishing\n");
87*4882a593Smuzhiyun 		err = -EINVAL;
88*4882a593Smuzhiyun 	}
89*4882a593Smuzhiyun 
90*4882a593Smuzhiyun 	if (buddy) {
91*4882a593Smuzhiyun 		if (i915_buddy_block_offset(buddy) != (offset ^ block_size)) {
92*4882a593Smuzhiyun 			pr_err("buddy has wrong offset\n");
93*4882a593Smuzhiyun 			err = -EINVAL;
94*4882a593Smuzhiyun 		}
95*4882a593Smuzhiyun 
96*4882a593Smuzhiyun 		if (i915_buddy_block_size(mm, buddy) != block_size) {
97*4882a593Smuzhiyun 			pr_err("buddy size mismatch\n");
98*4882a593Smuzhiyun 			err = -EINVAL;
99*4882a593Smuzhiyun 		}
100*4882a593Smuzhiyun 
101*4882a593Smuzhiyun 		if (i915_buddy_block_state(buddy) == block_state &&
102*4882a593Smuzhiyun 		    block_state == I915_BUDDY_FREE) {
103*4882a593Smuzhiyun 			pr_err("block and its buddy are free\n");
104*4882a593Smuzhiyun 			err = -EINVAL;
105*4882a593Smuzhiyun 		}
106*4882a593Smuzhiyun 	}
107*4882a593Smuzhiyun 
108*4882a593Smuzhiyun 	return err;
109*4882a593Smuzhiyun }
110*4882a593Smuzhiyun 
igt_check_blocks(struct i915_buddy_mm * mm,struct list_head * blocks,u64 expected_size,bool is_contiguous)111*4882a593Smuzhiyun static int igt_check_blocks(struct i915_buddy_mm *mm,
112*4882a593Smuzhiyun 			    struct list_head *blocks,
113*4882a593Smuzhiyun 			    u64 expected_size,
114*4882a593Smuzhiyun 			    bool is_contiguous)
115*4882a593Smuzhiyun {
116*4882a593Smuzhiyun 	struct i915_buddy_block *block;
117*4882a593Smuzhiyun 	struct i915_buddy_block *prev;
118*4882a593Smuzhiyun 	u64 total;
119*4882a593Smuzhiyun 	int err = 0;
120*4882a593Smuzhiyun 
121*4882a593Smuzhiyun 	block = NULL;
122*4882a593Smuzhiyun 	prev = NULL;
123*4882a593Smuzhiyun 	total = 0;
124*4882a593Smuzhiyun 
125*4882a593Smuzhiyun 	list_for_each_entry(block, blocks, link) {
126*4882a593Smuzhiyun 		err = igt_check_block(mm, block);
127*4882a593Smuzhiyun 
128*4882a593Smuzhiyun 		if (!i915_buddy_block_is_allocated(block)) {
129*4882a593Smuzhiyun 			pr_err("block not allocated\n"),
130*4882a593Smuzhiyun 			err = -EINVAL;
131*4882a593Smuzhiyun 		}
132*4882a593Smuzhiyun 
133*4882a593Smuzhiyun 		if (is_contiguous && prev) {
134*4882a593Smuzhiyun 			u64 prev_block_size;
135*4882a593Smuzhiyun 			u64 prev_offset;
136*4882a593Smuzhiyun 			u64 offset;
137*4882a593Smuzhiyun 
138*4882a593Smuzhiyun 			prev_offset = i915_buddy_block_offset(prev);
139*4882a593Smuzhiyun 			prev_block_size = i915_buddy_block_size(mm, prev);
140*4882a593Smuzhiyun 			offset = i915_buddy_block_offset(block);
141*4882a593Smuzhiyun 
142*4882a593Smuzhiyun 			if (offset != (prev_offset + prev_block_size)) {
143*4882a593Smuzhiyun 				pr_err("block offset mismatch\n");
144*4882a593Smuzhiyun 				err = -EINVAL;
145*4882a593Smuzhiyun 			}
146*4882a593Smuzhiyun 		}
147*4882a593Smuzhiyun 
148*4882a593Smuzhiyun 		if (err)
149*4882a593Smuzhiyun 			break;
150*4882a593Smuzhiyun 
151*4882a593Smuzhiyun 		total += i915_buddy_block_size(mm, block);
152*4882a593Smuzhiyun 		prev = block;
153*4882a593Smuzhiyun 	}
154*4882a593Smuzhiyun 
155*4882a593Smuzhiyun 	if (!err) {
156*4882a593Smuzhiyun 		if (total != expected_size) {
157*4882a593Smuzhiyun 			pr_err("size mismatch, expected=%llx, found=%llx\n",
158*4882a593Smuzhiyun 			       expected_size, total);
159*4882a593Smuzhiyun 			err = -EINVAL;
160*4882a593Smuzhiyun 		}
161*4882a593Smuzhiyun 		return err;
162*4882a593Smuzhiyun 	}
163*4882a593Smuzhiyun 
164*4882a593Smuzhiyun 	if (prev) {
165*4882a593Smuzhiyun 		pr_err("prev block, dump:\n");
166*4882a593Smuzhiyun 		igt_dump_block(mm, prev);
167*4882a593Smuzhiyun 	}
168*4882a593Smuzhiyun 
169*4882a593Smuzhiyun 	if (block) {
170*4882a593Smuzhiyun 		pr_err("bad block, dump:\n");
171*4882a593Smuzhiyun 		igt_dump_block(mm, block);
172*4882a593Smuzhiyun 	}
173*4882a593Smuzhiyun 
174*4882a593Smuzhiyun 	return err;
175*4882a593Smuzhiyun }
176*4882a593Smuzhiyun 
igt_check_mm(struct i915_buddy_mm * mm)177*4882a593Smuzhiyun static int igt_check_mm(struct i915_buddy_mm *mm)
178*4882a593Smuzhiyun {
179*4882a593Smuzhiyun 	struct i915_buddy_block *root;
180*4882a593Smuzhiyun 	struct i915_buddy_block *prev;
181*4882a593Smuzhiyun 	unsigned int i;
182*4882a593Smuzhiyun 	u64 total;
183*4882a593Smuzhiyun 	int err = 0;
184*4882a593Smuzhiyun 
185*4882a593Smuzhiyun 	if (!mm->n_roots) {
186*4882a593Smuzhiyun 		pr_err("n_roots is zero\n");
187*4882a593Smuzhiyun 		return -EINVAL;
188*4882a593Smuzhiyun 	}
189*4882a593Smuzhiyun 
190*4882a593Smuzhiyun 	if (mm->n_roots != hweight64(mm->size)) {
191*4882a593Smuzhiyun 		pr_err("n_roots mismatch, n_roots=%u, expected=%lu\n",
192*4882a593Smuzhiyun 		       mm->n_roots, hweight64(mm->size));
193*4882a593Smuzhiyun 		return -EINVAL;
194*4882a593Smuzhiyun 	}
195*4882a593Smuzhiyun 
196*4882a593Smuzhiyun 	root = NULL;
197*4882a593Smuzhiyun 	prev = NULL;
198*4882a593Smuzhiyun 	total = 0;
199*4882a593Smuzhiyun 
200*4882a593Smuzhiyun 	for (i = 0; i < mm->n_roots; ++i) {
201*4882a593Smuzhiyun 		struct i915_buddy_block *block;
202*4882a593Smuzhiyun 		unsigned int order;
203*4882a593Smuzhiyun 
204*4882a593Smuzhiyun 		root = mm->roots[i];
205*4882a593Smuzhiyun 		if (!root) {
206*4882a593Smuzhiyun 			pr_err("root(%u) is NULL\n", i);
207*4882a593Smuzhiyun 			err = -EINVAL;
208*4882a593Smuzhiyun 			break;
209*4882a593Smuzhiyun 		}
210*4882a593Smuzhiyun 
211*4882a593Smuzhiyun 		err = igt_check_block(mm, root);
212*4882a593Smuzhiyun 
213*4882a593Smuzhiyun 		if (!i915_buddy_block_is_free(root)) {
214*4882a593Smuzhiyun 			pr_err("root not free\n");
215*4882a593Smuzhiyun 			err = -EINVAL;
216*4882a593Smuzhiyun 		}
217*4882a593Smuzhiyun 
218*4882a593Smuzhiyun 		order = i915_buddy_block_order(root);
219*4882a593Smuzhiyun 
220*4882a593Smuzhiyun 		if (!i) {
221*4882a593Smuzhiyun 			if (order != mm->max_order) {
222*4882a593Smuzhiyun 				pr_err("max order root missing\n");
223*4882a593Smuzhiyun 				err = -EINVAL;
224*4882a593Smuzhiyun 			}
225*4882a593Smuzhiyun 		}
226*4882a593Smuzhiyun 
227*4882a593Smuzhiyun 		if (prev) {
228*4882a593Smuzhiyun 			u64 prev_block_size;
229*4882a593Smuzhiyun 			u64 prev_offset;
230*4882a593Smuzhiyun 			u64 offset;
231*4882a593Smuzhiyun 
232*4882a593Smuzhiyun 			prev_offset = i915_buddy_block_offset(prev);
233*4882a593Smuzhiyun 			prev_block_size = i915_buddy_block_size(mm, prev);
234*4882a593Smuzhiyun 			offset = i915_buddy_block_offset(root);
235*4882a593Smuzhiyun 
236*4882a593Smuzhiyun 			if (offset != (prev_offset + prev_block_size)) {
237*4882a593Smuzhiyun 				pr_err("root offset mismatch\n");
238*4882a593Smuzhiyun 				err = -EINVAL;
239*4882a593Smuzhiyun 			}
240*4882a593Smuzhiyun 		}
241*4882a593Smuzhiyun 
242*4882a593Smuzhiyun 		block = list_first_entry_or_null(&mm->free_list[order],
243*4882a593Smuzhiyun 						 struct i915_buddy_block,
244*4882a593Smuzhiyun 						 link);
245*4882a593Smuzhiyun 		if (block != root) {
246*4882a593Smuzhiyun 			pr_err("root mismatch at order=%u\n", order);
247*4882a593Smuzhiyun 			err = -EINVAL;
248*4882a593Smuzhiyun 		}
249*4882a593Smuzhiyun 
250*4882a593Smuzhiyun 		if (err)
251*4882a593Smuzhiyun 			break;
252*4882a593Smuzhiyun 
253*4882a593Smuzhiyun 		prev = root;
254*4882a593Smuzhiyun 		total += i915_buddy_block_size(mm, root);
255*4882a593Smuzhiyun 	}
256*4882a593Smuzhiyun 
257*4882a593Smuzhiyun 	if (!err) {
258*4882a593Smuzhiyun 		if (total != mm->size) {
259*4882a593Smuzhiyun 			pr_err("expected mm size=%llx, found=%llx\n", mm->size,
260*4882a593Smuzhiyun 			       total);
261*4882a593Smuzhiyun 			err = -EINVAL;
262*4882a593Smuzhiyun 		}
263*4882a593Smuzhiyun 		return err;
264*4882a593Smuzhiyun 	}
265*4882a593Smuzhiyun 
266*4882a593Smuzhiyun 	if (prev) {
267*4882a593Smuzhiyun 		pr_err("prev root(%u), dump:\n", i - 1);
268*4882a593Smuzhiyun 		igt_dump_block(mm, prev);
269*4882a593Smuzhiyun 	}
270*4882a593Smuzhiyun 
271*4882a593Smuzhiyun 	if (root) {
272*4882a593Smuzhiyun 		pr_err("bad root(%u), dump:\n", i);
273*4882a593Smuzhiyun 		igt_dump_block(mm, root);
274*4882a593Smuzhiyun 	}
275*4882a593Smuzhiyun 
276*4882a593Smuzhiyun 	return err;
277*4882a593Smuzhiyun }
278*4882a593Smuzhiyun 
igt_mm_config(u64 * size,u64 * chunk_size)279*4882a593Smuzhiyun static void igt_mm_config(u64 *size, u64 *chunk_size)
280*4882a593Smuzhiyun {
281*4882a593Smuzhiyun 	I915_RND_STATE(prng);
282*4882a593Smuzhiyun 	u32 s, ms;
283*4882a593Smuzhiyun 
284*4882a593Smuzhiyun 	/* Nothing fancy, just try to get an interesting bit pattern */
285*4882a593Smuzhiyun 
286*4882a593Smuzhiyun 	prandom_seed_state(&prng, i915_selftest.random_seed);
287*4882a593Smuzhiyun 
288*4882a593Smuzhiyun 	/* Let size be a random number of pages up to 8 GB (2M pages) */
289*4882a593Smuzhiyun 	s = 1 + i915_prandom_u32_max_state((BIT(33 - 12)) - 1, &prng);
290*4882a593Smuzhiyun 	/* Let the chunk size be a random power of 2 less than size */
291*4882a593Smuzhiyun 	ms = BIT(i915_prandom_u32_max_state(ilog2(s), &prng));
292*4882a593Smuzhiyun 	/* Round size down to the chunk size */
293*4882a593Smuzhiyun 	s &= -ms;
294*4882a593Smuzhiyun 
295*4882a593Smuzhiyun 	/* Convert from pages to bytes */
296*4882a593Smuzhiyun 	*chunk_size = (u64)ms << 12;
297*4882a593Smuzhiyun 	*size = (u64)s << 12;
298*4882a593Smuzhiyun }
299*4882a593Smuzhiyun 
igt_buddy_alloc_smoke(void * arg)300*4882a593Smuzhiyun static int igt_buddy_alloc_smoke(void *arg)
301*4882a593Smuzhiyun {
302*4882a593Smuzhiyun 	struct i915_buddy_mm mm;
303*4882a593Smuzhiyun 	IGT_TIMEOUT(end_time);
304*4882a593Smuzhiyun 	I915_RND_STATE(prng);
305*4882a593Smuzhiyun 	u64 chunk_size;
306*4882a593Smuzhiyun 	u64 mm_size;
307*4882a593Smuzhiyun 	int *order;
308*4882a593Smuzhiyun 	int err, i;
309*4882a593Smuzhiyun 
310*4882a593Smuzhiyun 	igt_mm_config(&mm_size, &chunk_size);
311*4882a593Smuzhiyun 
312*4882a593Smuzhiyun 	pr_info("buddy_init with size=%llx, chunk_size=%llx\n", mm_size, chunk_size);
313*4882a593Smuzhiyun 
314*4882a593Smuzhiyun 	err = i915_buddy_init(&mm, mm_size, chunk_size);
315*4882a593Smuzhiyun 	if (err) {
316*4882a593Smuzhiyun 		pr_err("buddy_init failed(%d)\n", err);
317*4882a593Smuzhiyun 		return err;
318*4882a593Smuzhiyun 	}
319*4882a593Smuzhiyun 
320*4882a593Smuzhiyun 	order = i915_random_order(mm.max_order + 1, &prng);
321*4882a593Smuzhiyun 	if (!order)
322*4882a593Smuzhiyun 		goto out_fini;
323*4882a593Smuzhiyun 
324*4882a593Smuzhiyun 	for (i = 0; i <= mm.max_order; ++i) {
325*4882a593Smuzhiyun 		struct i915_buddy_block *block;
326*4882a593Smuzhiyun 		int max_order = order[i];
327*4882a593Smuzhiyun 		bool timeout = false;
328*4882a593Smuzhiyun 		LIST_HEAD(blocks);
329*4882a593Smuzhiyun 		int order;
330*4882a593Smuzhiyun 		u64 total;
331*4882a593Smuzhiyun 
332*4882a593Smuzhiyun 		err = igt_check_mm(&mm);
333*4882a593Smuzhiyun 		if (err) {
334*4882a593Smuzhiyun 			pr_err("pre-mm check failed, abort\n");
335*4882a593Smuzhiyun 			break;
336*4882a593Smuzhiyun 		}
337*4882a593Smuzhiyun 
338*4882a593Smuzhiyun 		pr_info("filling from max_order=%u\n", max_order);
339*4882a593Smuzhiyun 
340*4882a593Smuzhiyun 		order = max_order;
341*4882a593Smuzhiyun 		total = 0;
342*4882a593Smuzhiyun 
343*4882a593Smuzhiyun 		do {
344*4882a593Smuzhiyun retry:
345*4882a593Smuzhiyun 			block = i915_buddy_alloc(&mm, order);
346*4882a593Smuzhiyun 			if (IS_ERR(block)) {
347*4882a593Smuzhiyun 				err = PTR_ERR(block);
348*4882a593Smuzhiyun 				if (err == -ENOMEM) {
349*4882a593Smuzhiyun 					pr_info("buddy_alloc hit -ENOMEM with order=%d\n",
350*4882a593Smuzhiyun 						order);
351*4882a593Smuzhiyun 				} else {
352*4882a593Smuzhiyun 					if (order--) {
353*4882a593Smuzhiyun 						err = 0;
354*4882a593Smuzhiyun 						goto retry;
355*4882a593Smuzhiyun 					}
356*4882a593Smuzhiyun 
357*4882a593Smuzhiyun 					pr_err("buddy_alloc with order=%d failed(%d)\n",
358*4882a593Smuzhiyun 					       order, err);
359*4882a593Smuzhiyun 				}
360*4882a593Smuzhiyun 
361*4882a593Smuzhiyun 				break;
362*4882a593Smuzhiyun 			}
363*4882a593Smuzhiyun 
364*4882a593Smuzhiyun 			list_add_tail(&block->link, &blocks);
365*4882a593Smuzhiyun 
366*4882a593Smuzhiyun 			if (i915_buddy_block_order(block) != order) {
367*4882a593Smuzhiyun 				pr_err("buddy_alloc order mismatch\n");
368*4882a593Smuzhiyun 				err = -EINVAL;
369*4882a593Smuzhiyun 				break;
370*4882a593Smuzhiyun 			}
371*4882a593Smuzhiyun 
372*4882a593Smuzhiyun 			total += i915_buddy_block_size(&mm, block);
373*4882a593Smuzhiyun 
374*4882a593Smuzhiyun 			if (__igt_timeout(end_time, NULL)) {
375*4882a593Smuzhiyun 				timeout = true;
376*4882a593Smuzhiyun 				break;
377*4882a593Smuzhiyun 			}
378*4882a593Smuzhiyun 		} while (total < mm.size);
379*4882a593Smuzhiyun 
380*4882a593Smuzhiyun 		if (!err)
381*4882a593Smuzhiyun 			err = igt_check_blocks(&mm, &blocks, total, false);
382*4882a593Smuzhiyun 
383*4882a593Smuzhiyun 		i915_buddy_free_list(&mm, &blocks);
384*4882a593Smuzhiyun 
385*4882a593Smuzhiyun 		if (!err) {
386*4882a593Smuzhiyun 			err = igt_check_mm(&mm);
387*4882a593Smuzhiyun 			if (err)
388*4882a593Smuzhiyun 				pr_err("post-mm check failed\n");
389*4882a593Smuzhiyun 		}
390*4882a593Smuzhiyun 
391*4882a593Smuzhiyun 		if (err || timeout)
392*4882a593Smuzhiyun 			break;
393*4882a593Smuzhiyun 
394*4882a593Smuzhiyun 		cond_resched();
395*4882a593Smuzhiyun 	}
396*4882a593Smuzhiyun 
397*4882a593Smuzhiyun 	if (err == -ENOMEM)
398*4882a593Smuzhiyun 		err = 0;
399*4882a593Smuzhiyun 
400*4882a593Smuzhiyun 	kfree(order);
401*4882a593Smuzhiyun out_fini:
402*4882a593Smuzhiyun 	i915_buddy_fini(&mm);
403*4882a593Smuzhiyun 
404*4882a593Smuzhiyun 	return err;
405*4882a593Smuzhiyun }
406*4882a593Smuzhiyun 
igt_buddy_alloc_pessimistic(void * arg)407*4882a593Smuzhiyun static int igt_buddy_alloc_pessimistic(void *arg)
408*4882a593Smuzhiyun {
409*4882a593Smuzhiyun 	const unsigned int max_order = 16;
410*4882a593Smuzhiyun 	struct i915_buddy_block *block, *bn;
411*4882a593Smuzhiyun 	struct i915_buddy_mm mm;
412*4882a593Smuzhiyun 	unsigned int order;
413*4882a593Smuzhiyun 	LIST_HEAD(blocks);
414*4882a593Smuzhiyun 	int err;
415*4882a593Smuzhiyun 
416*4882a593Smuzhiyun 	/*
417*4882a593Smuzhiyun 	 * Create a pot-sized mm, then allocate one of each possible
418*4882a593Smuzhiyun 	 * order within. This should leave the mm with exactly one
419*4882a593Smuzhiyun 	 * page left.
420*4882a593Smuzhiyun 	 */
421*4882a593Smuzhiyun 
422*4882a593Smuzhiyun 	err = i915_buddy_init(&mm, PAGE_SIZE << max_order, PAGE_SIZE);
423*4882a593Smuzhiyun 	if (err) {
424*4882a593Smuzhiyun 		pr_err("buddy_init failed(%d)\n", err);
425*4882a593Smuzhiyun 		return err;
426*4882a593Smuzhiyun 	}
427*4882a593Smuzhiyun 	GEM_BUG_ON(mm.max_order != max_order);
428*4882a593Smuzhiyun 
429*4882a593Smuzhiyun 	for (order = 0; order < max_order; order++) {
430*4882a593Smuzhiyun 		block = i915_buddy_alloc(&mm, order);
431*4882a593Smuzhiyun 		if (IS_ERR(block)) {
432*4882a593Smuzhiyun 			pr_info("buddy_alloc hit -ENOMEM with order=%d\n",
433*4882a593Smuzhiyun 				order);
434*4882a593Smuzhiyun 			err = PTR_ERR(block);
435*4882a593Smuzhiyun 			goto err;
436*4882a593Smuzhiyun 		}
437*4882a593Smuzhiyun 
438*4882a593Smuzhiyun 		list_add_tail(&block->link, &blocks);
439*4882a593Smuzhiyun 	}
440*4882a593Smuzhiyun 
441*4882a593Smuzhiyun 	/* And now the last remaining block available */
442*4882a593Smuzhiyun 	block = i915_buddy_alloc(&mm, 0);
443*4882a593Smuzhiyun 	if (IS_ERR(block)) {
444*4882a593Smuzhiyun 		pr_info("buddy_alloc hit -ENOMEM on final alloc\n");
445*4882a593Smuzhiyun 		err = PTR_ERR(block);
446*4882a593Smuzhiyun 		goto err;
447*4882a593Smuzhiyun 	}
448*4882a593Smuzhiyun 	list_add_tail(&block->link, &blocks);
449*4882a593Smuzhiyun 
450*4882a593Smuzhiyun 	/* Should be completely full! */
451*4882a593Smuzhiyun 	for (order = max_order; order--; ) {
452*4882a593Smuzhiyun 		block = i915_buddy_alloc(&mm, order);
453*4882a593Smuzhiyun 		if (!IS_ERR(block)) {
454*4882a593Smuzhiyun 			pr_info("buddy_alloc unexpectedly succeeded at order %d, it should be full!",
455*4882a593Smuzhiyun 				order);
456*4882a593Smuzhiyun 			list_add_tail(&block->link, &blocks);
457*4882a593Smuzhiyun 			err = -EINVAL;
458*4882a593Smuzhiyun 			goto err;
459*4882a593Smuzhiyun 		}
460*4882a593Smuzhiyun 	}
461*4882a593Smuzhiyun 
462*4882a593Smuzhiyun 	block = list_last_entry(&blocks, typeof(*block), link);
463*4882a593Smuzhiyun 	list_del(&block->link);
464*4882a593Smuzhiyun 	i915_buddy_free(&mm, block);
465*4882a593Smuzhiyun 
466*4882a593Smuzhiyun 	/* As we free in increasing size, we make available larger blocks */
467*4882a593Smuzhiyun 	order = 1;
468*4882a593Smuzhiyun 	list_for_each_entry_safe(block, bn, &blocks, link) {
469*4882a593Smuzhiyun 		list_del(&block->link);
470*4882a593Smuzhiyun 		i915_buddy_free(&mm, block);
471*4882a593Smuzhiyun 
472*4882a593Smuzhiyun 		block = i915_buddy_alloc(&mm, order);
473*4882a593Smuzhiyun 		if (IS_ERR(block)) {
474*4882a593Smuzhiyun 			pr_info("buddy_alloc (realloc) hit -ENOMEM with order=%d\n",
475*4882a593Smuzhiyun 				order);
476*4882a593Smuzhiyun 			err = PTR_ERR(block);
477*4882a593Smuzhiyun 			goto err;
478*4882a593Smuzhiyun 		}
479*4882a593Smuzhiyun 		i915_buddy_free(&mm, block);
480*4882a593Smuzhiyun 		order++;
481*4882a593Smuzhiyun 	}
482*4882a593Smuzhiyun 
483*4882a593Smuzhiyun 	/* To confirm, now the whole mm should be available */
484*4882a593Smuzhiyun 	block = i915_buddy_alloc(&mm, max_order);
485*4882a593Smuzhiyun 	if (IS_ERR(block)) {
486*4882a593Smuzhiyun 		pr_info("buddy_alloc (realloc) hit -ENOMEM with order=%d\n",
487*4882a593Smuzhiyun 			max_order);
488*4882a593Smuzhiyun 		err = PTR_ERR(block);
489*4882a593Smuzhiyun 		goto err;
490*4882a593Smuzhiyun 	}
491*4882a593Smuzhiyun 	i915_buddy_free(&mm, block);
492*4882a593Smuzhiyun 
493*4882a593Smuzhiyun err:
494*4882a593Smuzhiyun 	i915_buddy_free_list(&mm, &blocks);
495*4882a593Smuzhiyun 	i915_buddy_fini(&mm);
496*4882a593Smuzhiyun 	return err;
497*4882a593Smuzhiyun }
498*4882a593Smuzhiyun 
igt_buddy_alloc_optimistic(void * arg)499*4882a593Smuzhiyun static int igt_buddy_alloc_optimistic(void *arg)
500*4882a593Smuzhiyun {
501*4882a593Smuzhiyun 	const int max_order = 16;
502*4882a593Smuzhiyun 	struct i915_buddy_block *block;
503*4882a593Smuzhiyun 	struct i915_buddy_mm mm;
504*4882a593Smuzhiyun 	LIST_HEAD(blocks);
505*4882a593Smuzhiyun 	int order;
506*4882a593Smuzhiyun 	int err;
507*4882a593Smuzhiyun 
508*4882a593Smuzhiyun 	/*
509*4882a593Smuzhiyun 	 * Create a mm with one block of each order available, and
510*4882a593Smuzhiyun 	 * try to allocate them all.
511*4882a593Smuzhiyun 	 */
512*4882a593Smuzhiyun 
513*4882a593Smuzhiyun 	err = i915_buddy_init(&mm,
514*4882a593Smuzhiyun 			      PAGE_SIZE * ((1 << (max_order + 1)) - 1),
515*4882a593Smuzhiyun 			      PAGE_SIZE);
516*4882a593Smuzhiyun 	if (err) {
517*4882a593Smuzhiyun 		pr_err("buddy_init failed(%d)\n", err);
518*4882a593Smuzhiyun 		return err;
519*4882a593Smuzhiyun 	}
520*4882a593Smuzhiyun 	GEM_BUG_ON(mm.max_order != max_order);
521*4882a593Smuzhiyun 
522*4882a593Smuzhiyun 	for (order = 0; order <= max_order; order++) {
523*4882a593Smuzhiyun 		block = i915_buddy_alloc(&mm, order);
524*4882a593Smuzhiyun 		if (IS_ERR(block)) {
525*4882a593Smuzhiyun 			pr_info("buddy_alloc hit -ENOMEM with order=%d\n",
526*4882a593Smuzhiyun 				order);
527*4882a593Smuzhiyun 			err = PTR_ERR(block);
528*4882a593Smuzhiyun 			goto err;
529*4882a593Smuzhiyun 		}
530*4882a593Smuzhiyun 
531*4882a593Smuzhiyun 		list_add_tail(&block->link, &blocks);
532*4882a593Smuzhiyun 	}
533*4882a593Smuzhiyun 
534*4882a593Smuzhiyun 	/* Should be completely full! */
535*4882a593Smuzhiyun 	block = i915_buddy_alloc(&mm, 0);
536*4882a593Smuzhiyun 	if (!IS_ERR(block)) {
537*4882a593Smuzhiyun 		pr_info("buddy_alloc unexpectedly succeeded, it should be full!");
538*4882a593Smuzhiyun 		list_add_tail(&block->link, &blocks);
539*4882a593Smuzhiyun 		err = -EINVAL;
540*4882a593Smuzhiyun 		goto err;
541*4882a593Smuzhiyun 	}
542*4882a593Smuzhiyun 
543*4882a593Smuzhiyun err:
544*4882a593Smuzhiyun 	i915_buddy_free_list(&mm, &blocks);
545*4882a593Smuzhiyun 	i915_buddy_fini(&mm);
546*4882a593Smuzhiyun 	return err;
547*4882a593Smuzhiyun }
548*4882a593Smuzhiyun 
igt_buddy_alloc_pathological(void * arg)549*4882a593Smuzhiyun static int igt_buddy_alloc_pathological(void *arg)
550*4882a593Smuzhiyun {
551*4882a593Smuzhiyun 	const int max_order = 16;
552*4882a593Smuzhiyun 	struct i915_buddy_block *block;
553*4882a593Smuzhiyun 	struct i915_buddy_mm mm;
554*4882a593Smuzhiyun 	LIST_HEAD(blocks);
555*4882a593Smuzhiyun 	LIST_HEAD(holes);
556*4882a593Smuzhiyun 	int order, top;
557*4882a593Smuzhiyun 	int err;
558*4882a593Smuzhiyun 
559*4882a593Smuzhiyun 	/*
560*4882a593Smuzhiyun 	 * Create a pot-sized mm, then allocate one of each possible
561*4882a593Smuzhiyun 	 * order within. This should leave the mm with exactly one
562*4882a593Smuzhiyun 	 * page left. Free the largest block, then whittle down again.
563*4882a593Smuzhiyun 	 * Eventually we will have a fully 50% fragmented mm.
564*4882a593Smuzhiyun 	 */
565*4882a593Smuzhiyun 
566*4882a593Smuzhiyun 	err = i915_buddy_init(&mm, PAGE_SIZE << max_order, PAGE_SIZE);
567*4882a593Smuzhiyun 	if (err) {
568*4882a593Smuzhiyun 		pr_err("buddy_init failed(%d)\n", err);
569*4882a593Smuzhiyun 		return err;
570*4882a593Smuzhiyun 	}
571*4882a593Smuzhiyun 	GEM_BUG_ON(mm.max_order != max_order);
572*4882a593Smuzhiyun 
573*4882a593Smuzhiyun 	for (top = max_order; top; top--) {
574*4882a593Smuzhiyun 		/* Make room by freeing the largest allocated block */
575*4882a593Smuzhiyun 		block = list_first_entry_or_null(&blocks, typeof(*block), link);
576*4882a593Smuzhiyun 		if (block) {
577*4882a593Smuzhiyun 			list_del(&block->link);
578*4882a593Smuzhiyun 			i915_buddy_free(&mm, block);
579*4882a593Smuzhiyun 		}
580*4882a593Smuzhiyun 
581*4882a593Smuzhiyun 		for (order = top; order--; ) {
582*4882a593Smuzhiyun 			block = i915_buddy_alloc(&mm, order);
583*4882a593Smuzhiyun 			if (IS_ERR(block)) {
584*4882a593Smuzhiyun 				pr_info("buddy_alloc hit -ENOMEM with order=%d, top=%d\n",
585*4882a593Smuzhiyun 					order, top);
586*4882a593Smuzhiyun 				err = PTR_ERR(block);
587*4882a593Smuzhiyun 				goto err;
588*4882a593Smuzhiyun 			}
589*4882a593Smuzhiyun 			list_add_tail(&block->link, &blocks);
590*4882a593Smuzhiyun 		}
591*4882a593Smuzhiyun 
592*4882a593Smuzhiyun 		/* There should be one final page for this sub-allocation */
593*4882a593Smuzhiyun 		block = i915_buddy_alloc(&mm, 0);
594*4882a593Smuzhiyun 		if (IS_ERR(block)) {
595*4882a593Smuzhiyun 			pr_info("buddy_alloc hit -ENOMEM for hole\n");
596*4882a593Smuzhiyun 			err = PTR_ERR(block);
597*4882a593Smuzhiyun 			goto err;
598*4882a593Smuzhiyun 		}
599*4882a593Smuzhiyun 		list_add_tail(&block->link, &holes);
600*4882a593Smuzhiyun 
601*4882a593Smuzhiyun 		block = i915_buddy_alloc(&mm, top);
602*4882a593Smuzhiyun 		if (!IS_ERR(block)) {
603*4882a593Smuzhiyun 			pr_info("buddy_alloc unexpectedly succeeded at top-order %d/%d, it should be full!",
604*4882a593Smuzhiyun 				top, max_order);
605*4882a593Smuzhiyun 			list_add_tail(&block->link, &blocks);
606*4882a593Smuzhiyun 			err = -EINVAL;
607*4882a593Smuzhiyun 			goto err;
608*4882a593Smuzhiyun 		}
609*4882a593Smuzhiyun 	}
610*4882a593Smuzhiyun 
611*4882a593Smuzhiyun 	i915_buddy_free_list(&mm, &holes);
612*4882a593Smuzhiyun 
613*4882a593Smuzhiyun 	/* Nothing larger than blocks of chunk_size now available */
614*4882a593Smuzhiyun 	for (order = 1; order <= max_order; order++) {
615*4882a593Smuzhiyun 		block = i915_buddy_alloc(&mm, order);
616*4882a593Smuzhiyun 		if (!IS_ERR(block)) {
617*4882a593Smuzhiyun 			pr_info("buddy_alloc unexpectedly succeeded at order %d, it should be full!",
618*4882a593Smuzhiyun 				order);
619*4882a593Smuzhiyun 			list_add_tail(&block->link, &blocks);
620*4882a593Smuzhiyun 			err = -EINVAL;
621*4882a593Smuzhiyun 			goto err;
622*4882a593Smuzhiyun 		}
623*4882a593Smuzhiyun 	}
624*4882a593Smuzhiyun 
625*4882a593Smuzhiyun err:
626*4882a593Smuzhiyun 	list_splice_tail(&holes, &blocks);
627*4882a593Smuzhiyun 	i915_buddy_free_list(&mm, &blocks);
628*4882a593Smuzhiyun 	i915_buddy_fini(&mm);
629*4882a593Smuzhiyun 	return err;
630*4882a593Smuzhiyun }
631*4882a593Smuzhiyun 
igt_buddy_alloc_range(void * arg)632*4882a593Smuzhiyun static int igt_buddy_alloc_range(void *arg)
633*4882a593Smuzhiyun {
634*4882a593Smuzhiyun 	struct i915_buddy_mm mm;
635*4882a593Smuzhiyun 	unsigned long page_num;
636*4882a593Smuzhiyun 	LIST_HEAD(blocks);
637*4882a593Smuzhiyun 	u64 chunk_size;
638*4882a593Smuzhiyun 	u64 offset;
639*4882a593Smuzhiyun 	u64 size;
640*4882a593Smuzhiyun 	u64 rem;
641*4882a593Smuzhiyun 	int err;
642*4882a593Smuzhiyun 
643*4882a593Smuzhiyun 	igt_mm_config(&size, &chunk_size);
644*4882a593Smuzhiyun 
645*4882a593Smuzhiyun 	pr_info("buddy_init with size=%llx, chunk_size=%llx\n", size, chunk_size);
646*4882a593Smuzhiyun 
647*4882a593Smuzhiyun 	err = i915_buddy_init(&mm, size, chunk_size);
648*4882a593Smuzhiyun 	if (err) {
649*4882a593Smuzhiyun 		pr_err("buddy_init failed(%d)\n", err);
650*4882a593Smuzhiyun 		return err;
651*4882a593Smuzhiyun 	}
652*4882a593Smuzhiyun 
653*4882a593Smuzhiyun 	err = igt_check_mm(&mm);
654*4882a593Smuzhiyun 	if (err) {
655*4882a593Smuzhiyun 		pr_err("pre-mm check failed, abort, abort, abort!\n");
656*4882a593Smuzhiyun 		goto err_fini;
657*4882a593Smuzhiyun 	}
658*4882a593Smuzhiyun 
659*4882a593Smuzhiyun 	rem = mm.size;
660*4882a593Smuzhiyun 	offset = 0;
661*4882a593Smuzhiyun 
662*4882a593Smuzhiyun 	for_each_prime_number_from(page_num, 1, ULONG_MAX - 1) {
663*4882a593Smuzhiyun 		struct i915_buddy_block *block;
664*4882a593Smuzhiyun 		LIST_HEAD(tmp);
665*4882a593Smuzhiyun 
666*4882a593Smuzhiyun 		size = min(page_num * mm.chunk_size, rem);
667*4882a593Smuzhiyun 
668*4882a593Smuzhiyun 		err = i915_buddy_alloc_range(&mm, &tmp, offset, size);
669*4882a593Smuzhiyun 		if (err) {
670*4882a593Smuzhiyun 			if (err == -ENOMEM) {
671*4882a593Smuzhiyun 				pr_info("alloc_range hit -ENOMEM with size=%llx\n",
672*4882a593Smuzhiyun 					size);
673*4882a593Smuzhiyun 			} else {
674*4882a593Smuzhiyun 				pr_err("alloc_range with offset=%llx, size=%llx failed(%d)\n",
675*4882a593Smuzhiyun 				       offset, size, err);
676*4882a593Smuzhiyun 			}
677*4882a593Smuzhiyun 
678*4882a593Smuzhiyun 			break;
679*4882a593Smuzhiyun 		}
680*4882a593Smuzhiyun 
681*4882a593Smuzhiyun 		block = list_first_entry_or_null(&tmp,
682*4882a593Smuzhiyun 						 struct i915_buddy_block,
683*4882a593Smuzhiyun 						 link);
684*4882a593Smuzhiyun 		if (!block) {
685*4882a593Smuzhiyun 			pr_err("alloc_range has no blocks\n");
686*4882a593Smuzhiyun 			err = -EINVAL;
687*4882a593Smuzhiyun 			break;
688*4882a593Smuzhiyun 		}
689*4882a593Smuzhiyun 
690*4882a593Smuzhiyun 		if (i915_buddy_block_offset(block) != offset) {
691*4882a593Smuzhiyun 			pr_err("alloc_range start offset mismatch, found=%llx, expected=%llx\n",
692*4882a593Smuzhiyun 			       i915_buddy_block_offset(block), offset);
693*4882a593Smuzhiyun 			err = -EINVAL;
694*4882a593Smuzhiyun 		}
695*4882a593Smuzhiyun 
696*4882a593Smuzhiyun 		if (!err)
697*4882a593Smuzhiyun 			err = igt_check_blocks(&mm, &tmp, size, true);
698*4882a593Smuzhiyun 
699*4882a593Smuzhiyun 		list_splice_tail(&tmp, &blocks);
700*4882a593Smuzhiyun 
701*4882a593Smuzhiyun 		if (err)
702*4882a593Smuzhiyun 			break;
703*4882a593Smuzhiyun 
704*4882a593Smuzhiyun 		offset += size;
705*4882a593Smuzhiyun 
706*4882a593Smuzhiyun 		rem -= size;
707*4882a593Smuzhiyun 		if (!rem)
708*4882a593Smuzhiyun 			break;
709*4882a593Smuzhiyun 
710*4882a593Smuzhiyun 		cond_resched();
711*4882a593Smuzhiyun 	}
712*4882a593Smuzhiyun 
713*4882a593Smuzhiyun 	if (err == -ENOMEM)
714*4882a593Smuzhiyun 		err = 0;
715*4882a593Smuzhiyun 
716*4882a593Smuzhiyun 	i915_buddy_free_list(&mm, &blocks);
717*4882a593Smuzhiyun 
718*4882a593Smuzhiyun 	if (!err) {
719*4882a593Smuzhiyun 		err = igt_check_mm(&mm);
720*4882a593Smuzhiyun 		if (err)
721*4882a593Smuzhiyun 			pr_err("post-mm check failed\n");
722*4882a593Smuzhiyun 	}
723*4882a593Smuzhiyun 
724*4882a593Smuzhiyun err_fini:
725*4882a593Smuzhiyun 	i915_buddy_fini(&mm);
726*4882a593Smuzhiyun 
727*4882a593Smuzhiyun 	return err;
728*4882a593Smuzhiyun }
729*4882a593Smuzhiyun 
i915_buddy_mock_selftests(void)730*4882a593Smuzhiyun int i915_buddy_mock_selftests(void)
731*4882a593Smuzhiyun {
732*4882a593Smuzhiyun 	static const struct i915_subtest tests[] = {
733*4882a593Smuzhiyun 		SUBTEST(igt_buddy_alloc_pessimistic),
734*4882a593Smuzhiyun 		SUBTEST(igt_buddy_alloc_optimistic),
735*4882a593Smuzhiyun 		SUBTEST(igt_buddy_alloc_pathological),
736*4882a593Smuzhiyun 		SUBTEST(igt_buddy_alloc_smoke),
737*4882a593Smuzhiyun 		SUBTEST(igt_buddy_alloc_range),
738*4882a593Smuzhiyun 	};
739*4882a593Smuzhiyun 
740*4882a593Smuzhiyun 	return i915_subtests(tests, NULL);
741*4882a593Smuzhiyun }
742