xref: /OK3568_Linux_fs/kernel/drivers/gpu/drm/i915/selftests/i915_perf.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun  * SPDX-License-Identifier: MIT
3*4882a593Smuzhiyun  *
4*4882a593Smuzhiyun  * Copyright © 2019 Intel Corporation
5*4882a593Smuzhiyun  */
6*4882a593Smuzhiyun 
7*4882a593Smuzhiyun #include <linux/kref.h>
8*4882a593Smuzhiyun 
9*4882a593Smuzhiyun #include "gem/i915_gem_pm.h"
10*4882a593Smuzhiyun #include "gt/intel_gt.h"
11*4882a593Smuzhiyun 
12*4882a593Smuzhiyun #include "i915_selftest.h"
13*4882a593Smuzhiyun 
14*4882a593Smuzhiyun #include "igt_flush_test.h"
15*4882a593Smuzhiyun #include "lib_sw_fence.h"
16*4882a593Smuzhiyun 
17*4882a593Smuzhiyun #define TEST_OA_CONFIG_UUID "12345678-1234-1234-1234-1234567890ab"
18*4882a593Smuzhiyun 
19*4882a593Smuzhiyun static int
alloc_empty_config(struct i915_perf * perf)20*4882a593Smuzhiyun alloc_empty_config(struct i915_perf *perf)
21*4882a593Smuzhiyun {
22*4882a593Smuzhiyun 	struct i915_oa_config *oa_config;
23*4882a593Smuzhiyun 
24*4882a593Smuzhiyun 	oa_config = kzalloc(sizeof(*oa_config), GFP_KERNEL);
25*4882a593Smuzhiyun 	if (!oa_config)
26*4882a593Smuzhiyun 		return -ENOMEM;
27*4882a593Smuzhiyun 
28*4882a593Smuzhiyun 	oa_config->perf = perf;
29*4882a593Smuzhiyun 	kref_init(&oa_config->ref);
30*4882a593Smuzhiyun 
31*4882a593Smuzhiyun 	strlcpy(oa_config->uuid, TEST_OA_CONFIG_UUID, sizeof(oa_config->uuid));
32*4882a593Smuzhiyun 
33*4882a593Smuzhiyun 	mutex_lock(&perf->metrics_lock);
34*4882a593Smuzhiyun 
35*4882a593Smuzhiyun 	oa_config->id = idr_alloc(&perf->metrics_idr, oa_config, 2, 0, GFP_KERNEL);
36*4882a593Smuzhiyun 	if (oa_config->id < 0)  {
37*4882a593Smuzhiyun 		mutex_unlock(&perf->metrics_lock);
38*4882a593Smuzhiyun 		i915_oa_config_put(oa_config);
39*4882a593Smuzhiyun 		return -ENOMEM;
40*4882a593Smuzhiyun 	}
41*4882a593Smuzhiyun 
42*4882a593Smuzhiyun 	mutex_unlock(&perf->metrics_lock);
43*4882a593Smuzhiyun 
44*4882a593Smuzhiyun 	return 0;
45*4882a593Smuzhiyun }
46*4882a593Smuzhiyun 
47*4882a593Smuzhiyun static void
destroy_empty_config(struct i915_perf * perf)48*4882a593Smuzhiyun destroy_empty_config(struct i915_perf *perf)
49*4882a593Smuzhiyun {
50*4882a593Smuzhiyun 	struct i915_oa_config *oa_config = NULL, *tmp;
51*4882a593Smuzhiyun 	int id;
52*4882a593Smuzhiyun 
53*4882a593Smuzhiyun 	mutex_lock(&perf->metrics_lock);
54*4882a593Smuzhiyun 
55*4882a593Smuzhiyun 	idr_for_each_entry(&perf->metrics_idr, tmp, id) {
56*4882a593Smuzhiyun 		if (!strcmp(tmp->uuid, TEST_OA_CONFIG_UUID)) {
57*4882a593Smuzhiyun 			oa_config = tmp;
58*4882a593Smuzhiyun 			break;
59*4882a593Smuzhiyun 		}
60*4882a593Smuzhiyun 	}
61*4882a593Smuzhiyun 
62*4882a593Smuzhiyun 	if (oa_config)
63*4882a593Smuzhiyun 		idr_remove(&perf->metrics_idr, oa_config->id);
64*4882a593Smuzhiyun 
65*4882a593Smuzhiyun 	mutex_unlock(&perf->metrics_lock);
66*4882a593Smuzhiyun 
67*4882a593Smuzhiyun 	if (oa_config)
68*4882a593Smuzhiyun 		i915_oa_config_put(oa_config);
69*4882a593Smuzhiyun }
70*4882a593Smuzhiyun 
71*4882a593Smuzhiyun static struct i915_oa_config *
get_empty_config(struct i915_perf * perf)72*4882a593Smuzhiyun get_empty_config(struct i915_perf *perf)
73*4882a593Smuzhiyun {
74*4882a593Smuzhiyun 	struct i915_oa_config *oa_config = NULL, *tmp;
75*4882a593Smuzhiyun 	int id;
76*4882a593Smuzhiyun 
77*4882a593Smuzhiyun 	mutex_lock(&perf->metrics_lock);
78*4882a593Smuzhiyun 
79*4882a593Smuzhiyun 	idr_for_each_entry(&perf->metrics_idr, tmp, id) {
80*4882a593Smuzhiyun 		if (!strcmp(tmp->uuid, TEST_OA_CONFIG_UUID)) {
81*4882a593Smuzhiyun 			oa_config = i915_oa_config_get(tmp);
82*4882a593Smuzhiyun 			break;
83*4882a593Smuzhiyun 		}
84*4882a593Smuzhiyun 	}
85*4882a593Smuzhiyun 
86*4882a593Smuzhiyun 	mutex_unlock(&perf->metrics_lock);
87*4882a593Smuzhiyun 
88*4882a593Smuzhiyun 	return oa_config;
89*4882a593Smuzhiyun }
90*4882a593Smuzhiyun 
91*4882a593Smuzhiyun static struct i915_perf_stream *
test_stream(struct i915_perf * perf)92*4882a593Smuzhiyun test_stream(struct i915_perf *perf)
93*4882a593Smuzhiyun {
94*4882a593Smuzhiyun 	struct drm_i915_perf_open_param param = {};
95*4882a593Smuzhiyun 	struct i915_oa_config *oa_config = get_empty_config(perf);
96*4882a593Smuzhiyun 	struct perf_open_properties props = {
97*4882a593Smuzhiyun 		.engine = intel_engine_lookup_user(perf->i915,
98*4882a593Smuzhiyun 						   I915_ENGINE_CLASS_RENDER,
99*4882a593Smuzhiyun 						   0),
100*4882a593Smuzhiyun 		.sample_flags = SAMPLE_OA_REPORT,
101*4882a593Smuzhiyun 		.oa_format = IS_GEN(perf->i915, 12) ?
102*4882a593Smuzhiyun 		I915_OA_FORMAT_A32u40_A4u32_B8_C8 : I915_OA_FORMAT_C4_B8,
103*4882a593Smuzhiyun 	};
104*4882a593Smuzhiyun 	struct i915_perf_stream *stream;
105*4882a593Smuzhiyun 
106*4882a593Smuzhiyun 	if (!oa_config)
107*4882a593Smuzhiyun 		return NULL;
108*4882a593Smuzhiyun 
109*4882a593Smuzhiyun 	props.metrics_set = oa_config->id;
110*4882a593Smuzhiyun 
111*4882a593Smuzhiyun 	stream = kzalloc(sizeof(*stream), GFP_KERNEL);
112*4882a593Smuzhiyun 	if (!stream) {
113*4882a593Smuzhiyun 		i915_oa_config_put(oa_config);
114*4882a593Smuzhiyun 		return NULL;
115*4882a593Smuzhiyun 	}
116*4882a593Smuzhiyun 
117*4882a593Smuzhiyun 	stream->perf = perf;
118*4882a593Smuzhiyun 
119*4882a593Smuzhiyun 	mutex_lock(&perf->lock);
120*4882a593Smuzhiyun 	if (i915_oa_stream_init(stream, &param, &props)) {
121*4882a593Smuzhiyun 		kfree(stream);
122*4882a593Smuzhiyun 		stream =  NULL;
123*4882a593Smuzhiyun 	}
124*4882a593Smuzhiyun 	mutex_unlock(&perf->lock);
125*4882a593Smuzhiyun 
126*4882a593Smuzhiyun 	i915_oa_config_put(oa_config);
127*4882a593Smuzhiyun 
128*4882a593Smuzhiyun 	return stream;
129*4882a593Smuzhiyun }
130*4882a593Smuzhiyun 
stream_destroy(struct i915_perf_stream * stream)131*4882a593Smuzhiyun static void stream_destroy(struct i915_perf_stream *stream)
132*4882a593Smuzhiyun {
133*4882a593Smuzhiyun 	struct i915_perf *perf = stream->perf;
134*4882a593Smuzhiyun 
135*4882a593Smuzhiyun 	mutex_lock(&perf->lock);
136*4882a593Smuzhiyun 	i915_perf_destroy_locked(stream);
137*4882a593Smuzhiyun 	mutex_unlock(&perf->lock);
138*4882a593Smuzhiyun }
139*4882a593Smuzhiyun 
live_sanitycheck(void * arg)140*4882a593Smuzhiyun static int live_sanitycheck(void *arg)
141*4882a593Smuzhiyun {
142*4882a593Smuzhiyun 	struct drm_i915_private *i915 = arg;
143*4882a593Smuzhiyun 	struct i915_perf_stream *stream;
144*4882a593Smuzhiyun 
145*4882a593Smuzhiyun 	/* Quick check we can create a perf stream */
146*4882a593Smuzhiyun 
147*4882a593Smuzhiyun 	stream = test_stream(&i915->perf);
148*4882a593Smuzhiyun 	if (!stream)
149*4882a593Smuzhiyun 		return -EINVAL;
150*4882a593Smuzhiyun 
151*4882a593Smuzhiyun 	stream_destroy(stream);
152*4882a593Smuzhiyun 	return 0;
153*4882a593Smuzhiyun }
154*4882a593Smuzhiyun 
write_timestamp(struct i915_request * rq,int slot)155*4882a593Smuzhiyun static int write_timestamp(struct i915_request *rq, int slot)
156*4882a593Smuzhiyun {
157*4882a593Smuzhiyun 	u32 *cs;
158*4882a593Smuzhiyun 	int len;
159*4882a593Smuzhiyun 
160*4882a593Smuzhiyun 	cs = intel_ring_begin(rq, 6);
161*4882a593Smuzhiyun 	if (IS_ERR(cs))
162*4882a593Smuzhiyun 		return PTR_ERR(cs);
163*4882a593Smuzhiyun 
164*4882a593Smuzhiyun 	len = 5;
165*4882a593Smuzhiyun 	if (INTEL_GEN(rq->engine->i915) >= 8)
166*4882a593Smuzhiyun 		len++;
167*4882a593Smuzhiyun 
168*4882a593Smuzhiyun 	*cs++ = GFX_OP_PIPE_CONTROL(len);
169*4882a593Smuzhiyun 	*cs++ = PIPE_CONTROL_GLOBAL_GTT_IVB |
170*4882a593Smuzhiyun 		PIPE_CONTROL_STORE_DATA_INDEX |
171*4882a593Smuzhiyun 		PIPE_CONTROL_WRITE_TIMESTAMP;
172*4882a593Smuzhiyun 	*cs++ = slot * sizeof(u32);
173*4882a593Smuzhiyun 	*cs++ = 0;
174*4882a593Smuzhiyun 	*cs++ = 0;
175*4882a593Smuzhiyun 	*cs++ = 0;
176*4882a593Smuzhiyun 
177*4882a593Smuzhiyun 	intel_ring_advance(rq, cs);
178*4882a593Smuzhiyun 
179*4882a593Smuzhiyun 	return 0;
180*4882a593Smuzhiyun }
181*4882a593Smuzhiyun 
poll_status(struct i915_request * rq,int slot)182*4882a593Smuzhiyun static ktime_t poll_status(struct i915_request *rq, int slot)
183*4882a593Smuzhiyun {
184*4882a593Smuzhiyun 	while (!intel_read_status_page(rq->engine, slot) &&
185*4882a593Smuzhiyun 	       !i915_request_completed(rq))
186*4882a593Smuzhiyun 		cpu_relax();
187*4882a593Smuzhiyun 
188*4882a593Smuzhiyun 	return ktime_get();
189*4882a593Smuzhiyun }
190*4882a593Smuzhiyun 
live_noa_delay(void * arg)191*4882a593Smuzhiyun static int live_noa_delay(void *arg)
192*4882a593Smuzhiyun {
193*4882a593Smuzhiyun 	struct drm_i915_private *i915 = arg;
194*4882a593Smuzhiyun 	struct i915_perf_stream *stream;
195*4882a593Smuzhiyun 	struct i915_request *rq;
196*4882a593Smuzhiyun 	ktime_t t0, t1;
197*4882a593Smuzhiyun 	u64 expected;
198*4882a593Smuzhiyun 	u32 delay;
199*4882a593Smuzhiyun 	int err;
200*4882a593Smuzhiyun 	int i;
201*4882a593Smuzhiyun 
202*4882a593Smuzhiyun 	/* Check that the GPU delays matches expectations */
203*4882a593Smuzhiyun 
204*4882a593Smuzhiyun 	stream = test_stream(&i915->perf);
205*4882a593Smuzhiyun 	if (!stream)
206*4882a593Smuzhiyun 		return -ENOMEM;
207*4882a593Smuzhiyun 
208*4882a593Smuzhiyun 	expected = atomic64_read(&stream->perf->noa_programming_delay);
209*4882a593Smuzhiyun 
210*4882a593Smuzhiyun 	if (stream->engine->class != RENDER_CLASS) {
211*4882a593Smuzhiyun 		err = -ENODEV;
212*4882a593Smuzhiyun 		goto out;
213*4882a593Smuzhiyun 	}
214*4882a593Smuzhiyun 
215*4882a593Smuzhiyun 	for (i = 0; i < 4; i++)
216*4882a593Smuzhiyun 		intel_write_status_page(stream->engine, 0x100 + i, 0);
217*4882a593Smuzhiyun 
218*4882a593Smuzhiyun 	rq = intel_engine_create_kernel_request(stream->engine);
219*4882a593Smuzhiyun 	if (IS_ERR(rq)) {
220*4882a593Smuzhiyun 		err = PTR_ERR(rq);
221*4882a593Smuzhiyun 		goto out;
222*4882a593Smuzhiyun 	}
223*4882a593Smuzhiyun 
224*4882a593Smuzhiyun 	if (rq->engine->emit_init_breadcrumb) {
225*4882a593Smuzhiyun 		err = rq->engine->emit_init_breadcrumb(rq);
226*4882a593Smuzhiyun 		if (err) {
227*4882a593Smuzhiyun 			i915_request_add(rq);
228*4882a593Smuzhiyun 			goto out;
229*4882a593Smuzhiyun 		}
230*4882a593Smuzhiyun 	}
231*4882a593Smuzhiyun 
232*4882a593Smuzhiyun 	err = write_timestamp(rq, 0x100);
233*4882a593Smuzhiyun 	if (err) {
234*4882a593Smuzhiyun 		i915_request_add(rq);
235*4882a593Smuzhiyun 		goto out;
236*4882a593Smuzhiyun 	}
237*4882a593Smuzhiyun 
238*4882a593Smuzhiyun 	err = rq->engine->emit_bb_start(rq,
239*4882a593Smuzhiyun 					i915_ggtt_offset(stream->noa_wait), 0,
240*4882a593Smuzhiyun 					I915_DISPATCH_SECURE);
241*4882a593Smuzhiyun 	if (err) {
242*4882a593Smuzhiyun 		i915_request_add(rq);
243*4882a593Smuzhiyun 		goto out;
244*4882a593Smuzhiyun 	}
245*4882a593Smuzhiyun 
246*4882a593Smuzhiyun 	err = write_timestamp(rq, 0x102);
247*4882a593Smuzhiyun 	if (err) {
248*4882a593Smuzhiyun 		i915_request_add(rq);
249*4882a593Smuzhiyun 		goto out;
250*4882a593Smuzhiyun 	}
251*4882a593Smuzhiyun 
252*4882a593Smuzhiyun 	i915_request_get(rq);
253*4882a593Smuzhiyun 	i915_request_add(rq);
254*4882a593Smuzhiyun 
255*4882a593Smuzhiyun 	preempt_disable();
256*4882a593Smuzhiyun 	t0 = poll_status(rq, 0x100);
257*4882a593Smuzhiyun 	t1 = poll_status(rq, 0x102);
258*4882a593Smuzhiyun 	preempt_enable();
259*4882a593Smuzhiyun 
260*4882a593Smuzhiyun 	pr_info("CPU delay: %lluns, expected %lluns\n",
261*4882a593Smuzhiyun 		ktime_sub(t1, t0), expected);
262*4882a593Smuzhiyun 
263*4882a593Smuzhiyun 	delay = intel_read_status_page(stream->engine, 0x102);
264*4882a593Smuzhiyun 	delay -= intel_read_status_page(stream->engine, 0x100);
265*4882a593Smuzhiyun 	delay = i915_cs_timestamp_ticks_to_ns(i915, delay);
266*4882a593Smuzhiyun 	pr_info("GPU delay: %uns, expected %lluns\n",
267*4882a593Smuzhiyun 		delay, expected);
268*4882a593Smuzhiyun 
269*4882a593Smuzhiyun 	if (4 * delay < 3 * expected || 2 * delay > 3 * expected) {
270*4882a593Smuzhiyun 		pr_err("GPU delay [%uus] outside of expected threshold! [%lluus, %lluus]\n",
271*4882a593Smuzhiyun 		       delay / 1000,
272*4882a593Smuzhiyun 		       div_u64(3 * expected, 4000),
273*4882a593Smuzhiyun 		       div_u64(3 * expected, 2000));
274*4882a593Smuzhiyun 		err = -EINVAL;
275*4882a593Smuzhiyun 	}
276*4882a593Smuzhiyun 
277*4882a593Smuzhiyun 	i915_request_put(rq);
278*4882a593Smuzhiyun out:
279*4882a593Smuzhiyun 	stream_destroy(stream);
280*4882a593Smuzhiyun 	return err;
281*4882a593Smuzhiyun }
282*4882a593Smuzhiyun 
live_noa_gpr(void * arg)283*4882a593Smuzhiyun static int live_noa_gpr(void *arg)
284*4882a593Smuzhiyun {
285*4882a593Smuzhiyun 	struct drm_i915_private *i915 = arg;
286*4882a593Smuzhiyun 	struct i915_perf_stream *stream;
287*4882a593Smuzhiyun 	struct intel_context *ce;
288*4882a593Smuzhiyun 	struct i915_request *rq;
289*4882a593Smuzhiyun 	u32 *cs, *store;
290*4882a593Smuzhiyun 	void *scratch;
291*4882a593Smuzhiyun 	u32 gpr0;
292*4882a593Smuzhiyun 	int err;
293*4882a593Smuzhiyun 	int i;
294*4882a593Smuzhiyun 
295*4882a593Smuzhiyun 	/* Check that the delay does not clobber user context state (GPR) */
296*4882a593Smuzhiyun 
297*4882a593Smuzhiyun 	stream = test_stream(&i915->perf);
298*4882a593Smuzhiyun 	if (!stream)
299*4882a593Smuzhiyun 		return -ENOMEM;
300*4882a593Smuzhiyun 
301*4882a593Smuzhiyun 	gpr0 = i915_mmio_reg_offset(GEN8_RING_CS_GPR(stream->engine->mmio_base, 0));
302*4882a593Smuzhiyun 
303*4882a593Smuzhiyun 	ce = intel_context_create(stream->engine);
304*4882a593Smuzhiyun 	if (IS_ERR(ce)) {
305*4882a593Smuzhiyun 		err = PTR_ERR(ce);
306*4882a593Smuzhiyun 		goto out;
307*4882a593Smuzhiyun 	}
308*4882a593Smuzhiyun 
309*4882a593Smuzhiyun 	/* Poison the ce->vm so we detect writes not to the GGTT gt->scratch */
310*4882a593Smuzhiyun 	scratch = kmap(__px_page(ce->vm->scratch[0]));
311*4882a593Smuzhiyun 	memset(scratch, POISON_FREE, PAGE_SIZE);
312*4882a593Smuzhiyun 
313*4882a593Smuzhiyun 	rq = intel_context_create_request(ce);
314*4882a593Smuzhiyun 	if (IS_ERR(rq)) {
315*4882a593Smuzhiyun 		err = PTR_ERR(rq);
316*4882a593Smuzhiyun 		goto out_ce;
317*4882a593Smuzhiyun 	}
318*4882a593Smuzhiyun 	i915_request_get(rq);
319*4882a593Smuzhiyun 
320*4882a593Smuzhiyun 	if (rq->engine->emit_init_breadcrumb) {
321*4882a593Smuzhiyun 		err = rq->engine->emit_init_breadcrumb(rq);
322*4882a593Smuzhiyun 		if (err) {
323*4882a593Smuzhiyun 			i915_request_add(rq);
324*4882a593Smuzhiyun 			goto out_rq;
325*4882a593Smuzhiyun 		}
326*4882a593Smuzhiyun 	}
327*4882a593Smuzhiyun 
328*4882a593Smuzhiyun 	/* Fill the 16 qword [32 dword] GPR with a known unlikely value */
329*4882a593Smuzhiyun 	cs = intel_ring_begin(rq, 2 * 32 + 2);
330*4882a593Smuzhiyun 	if (IS_ERR(cs)) {
331*4882a593Smuzhiyun 		err = PTR_ERR(cs);
332*4882a593Smuzhiyun 		i915_request_add(rq);
333*4882a593Smuzhiyun 		goto out_rq;
334*4882a593Smuzhiyun 	}
335*4882a593Smuzhiyun 
336*4882a593Smuzhiyun 	*cs++ = MI_LOAD_REGISTER_IMM(32);
337*4882a593Smuzhiyun 	for (i = 0; i < 32; i++) {
338*4882a593Smuzhiyun 		*cs++ = gpr0 + i * sizeof(u32);
339*4882a593Smuzhiyun 		*cs++ = STACK_MAGIC;
340*4882a593Smuzhiyun 	}
341*4882a593Smuzhiyun 	*cs++ = MI_NOOP;
342*4882a593Smuzhiyun 	intel_ring_advance(rq, cs);
343*4882a593Smuzhiyun 
344*4882a593Smuzhiyun 	/* Execute the GPU delay */
345*4882a593Smuzhiyun 	err = rq->engine->emit_bb_start(rq,
346*4882a593Smuzhiyun 					i915_ggtt_offset(stream->noa_wait), 0,
347*4882a593Smuzhiyun 					I915_DISPATCH_SECURE);
348*4882a593Smuzhiyun 	if (err) {
349*4882a593Smuzhiyun 		i915_request_add(rq);
350*4882a593Smuzhiyun 		goto out_rq;
351*4882a593Smuzhiyun 	}
352*4882a593Smuzhiyun 
353*4882a593Smuzhiyun 	/* Read the GPR back, using the pinned global HWSP for convenience */
354*4882a593Smuzhiyun 	store = memset32(rq->engine->status_page.addr + 512, 0, 32);
355*4882a593Smuzhiyun 	for (i = 0; i < 32; i++) {
356*4882a593Smuzhiyun 		u32 cmd;
357*4882a593Smuzhiyun 
358*4882a593Smuzhiyun 		cs = intel_ring_begin(rq, 4);
359*4882a593Smuzhiyun 		if (IS_ERR(cs)) {
360*4882a593Smuzhiyun 			err = PTR_ERR(cs);
361*4882a593Smuzhiyun 			i915_request_add(rq);
362*4882a593Smuzhiyun 			goto out_rq;
363*4882a593Smuzhiyun 		}
364*4882a593Smuzhiyun 
365*4882a593Smuzhiyun 		cmd = MI_STORE_REGISTER_MEM;
366*4882a593Smuzhiyun 		if (INTEL_GEN(i915) >= 8)
367*4882a593Smuzhiyun 			cmd++;
368*4882a593Smuzhiyun 		cmd |= MI_USE_GGTT;
369*4882a593Smuzhiyun 
370*4882a593Smuzhiyun 		*cs++ = cmd;
371*4882a593Smuzhiyun 		*cs++ = gpr0 + i * sizeof(u32);
372*4882a593Smuzhiyun 		*cs++ = i915_ggtt_offset(rq->engine->status_page.vma) +
373*4882a593Smuzhiyun 			offset_in_page(store) +
374*4882a593Smuzhiyun 			i * sizeof(u32);
375*4882a593Smuzhiyun 		*cs++ = 0;
376*4882a593Smuzhiyun 		intel_ring_advance(rq, cs);
377*4882a593Smuzhiyun 	}
378*4882a593Smuzhiyun 
379*4882a593Smuzhiyun 	i915_request_add(rq);
380*4882a593Smuzhiyun 
381*4882a593Smuzhiyun 	if (i915_request_wait(rq, I915_WAIT_INTERRUPTIBLE, HZ / 2) < 0) {
382*4882a593Smuzhiyun 		pr_err("noa_wait timed out\n");
383*4882a593Smuzhiyun 		intel_gt_set_wedged(stream->engine->gt);
384*4882a593Smuzhiyun 		err = -EIO;
385*4882a593Smuzhiyun 		goto out_rq;
386*4882a593Smuzhiyun 	}
387*4882a593Smuzhiyun 
388*4882a593Smuzhiyun 	/* Verify that the GPR contain our expected values */
389*4882a593Smuzhiyun 	for (i = 0; i < 32; i++) {
390*4882a593Smuzhiyun 		if (store[i] == STACK_MAGIC)
391*4882a593Smuzhiyun 			continue;
392*4882a593Smuzhiyun 
393*4882a593Smuzhiyun 		pr_err("GPR[%d] lost, found:%08x, expected:%08x!\n",
394*4882a593Smuzhiyun 		       i, store[i], STACK_MAGIC);
395*4882a593Smuzhiyun 		err = -EINVAL;
396*4882a593Smuzhiyun 	}
397*4882a593Smuzhiyun 
398*4882a593Smuzhiyun 	/* Verify that the user's scratch page was not used for GPR storage */
399*4882a593Smuzhiyun 	if (memchr_inv(scratch, POISON_FREE, PAGE_SIZE)) {
400*4882a593Smuzhiyun 		pr_err("Scratch page overwritten!\n");
401*4882a593Smuzhiyun 		igt_hexdump(scratch, 4096);
402*4882a593Smuzhiyun 		err = -EINVAL;
403*4882a593Smuzhiyun 	}
404*4882a593Smuzhiyun 
405*4882a593Smuzhiyun out_rq:
406*4882a593Smuzhiyun 	i915_request_put(rq);
407*4882a593Smuzhiyun out_ce:
408*4882a593Smuzhiyun 	kunmap(__px_page(ce->vm->scratch[0]));
409*4882a593Smuzhiyun 	intel_context_put(ce);
410*4882a593Smuzhiyun out:
411*4882a593Smuzhiyun 	stream_destroy(stream);
412*4882a593Smuzhiyun 	return err;
413*4882a593Smuzhiyun }
414*4882a593Smuzhiyun 
i915_perf_live_selftests(struct drm_i915_private * i915)415*4882a593Smuzhiyun int i915_perf_live_selftests(struct drm_i915_private *i915)
416*4882a593Smuzhiyun {
417*4882a593Smuzhiyun 	static const struct i915_subtest tests[] = {
418*4882a593Smuzhiyun 		SUBTEST(live_sanitycheck),
419*4882a593Smuzhiyun 		SUBTEST(live_noa_delay),
420*4882a593Smuzhiyun 		SUBTEST(live_noa_gpr),
421*4882a593Smuzhiyun 	};
422*4882a593Smuzhiyun 	struct i915_perf *perf = &i915->perf;
423*4882a593Smuzhiyun 	int err;
424*4882a593Smuzhiyun 
425*4882a593Smuzhiyun 	if (!perf->metrics_kobj || !perf->ops.enable_metric_set)
426*4882a593Smuzhiyun 		return 0;
427*4882a593Smuzhiyun 
428*4882a593Smuzhiyun 	if (intel_gt_is_wedged(&i915->gt))
429*4882a593Smuzhiyun 		return 0;
430*4882a593Smuzhiyun 
431*4882a593Smuzhiyun 	err = alloc_empty_config(&i915->perf);
432*4882a593Smuzhiyun 	if (err)
433*4882a593Smuzhiyun 		return err;
434*4882a593Smuzhiyun 
435*4882a593Smuzhiyun 	err = i915_subtests(tests, i915);
436*4882a593Smuzhiyun 
437*4882a593Smuzhiyun 	destroy_empty_config(&i915->perf);
438*4882a593Smuzhiyun 
439*4882a593Smuzhiyun 	return err;
440*4882a593Smuzhiyun }
441