1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun * SPDX-License-Identifier: GPL-2.0
3*4882a593Smuzhiyun *
4*4882a593Smuzhiyun * Copyright © 2019 Intel Corporation
5*4882a593Smuzhiyun */
6*4882a593Smuzhiyun
7*4882a593Smuzhiyun #include "i915_selftest.h"
8*4882a593Smuzhiyun #include "intel_engine_heartbeat.h"
9*4882a593Smuzhiyun #include "intel_engine_pm.h"
10*4882a593Smuzhiyun #include "intel_gt.h"
11*4882a593Smuzhiyun
12*4882a593Smuzhiyun #include "gem/selftests/mock_context.h"
13*4882a593Smuzhiyun #include "selftests/igt_flush_test.h"
14*4882a593Smuzhiyun #include "selftests/mock_drm.h"
15*4882a593Smuzhiyun
request_sync(struct i915_request * rq)16*4882a593Smuzhiyun static int request_sync(struct i915_request *rq)
17*4882a593Smuzhiyun {
18*4882a593Smuzhiyun struct intel_timeline *tl = i915_request_timeline(rq);
19*4882a593Smuzhiyun long timeout;
20*4882a593Smuzhiyun int err = 0;
21*4882a593Smuzhiyun
22*4882a593Smuzhiyun intel_timeline_get(tl);
23*4882a593Smuzhiyun i915_request_get(rq);
24*4882a593Smuzhiyun
25*4882a593Smuzhiyun /* Opencode i915_request_add() so we can keep the timeline locked. */
26*4882a593Smuzhiyun __i915_request_commit(rq);
27*4882a593Smuzhiyun rq->sched.attr.priority = I915_PRIORITY_BARRIER;
28*4882a593Smuzhiyun __i915_request_queue(rq, NULL);
29*4882a593Smuzhiyun
30*4882a593Smuzhiyun timeout = i915_request_wait(rq, 0, HZ / 10);
31*4882a593Smuzhiyun if (timeout < 0)
32*4882a593Smuzhiyun err = timeout;
33*4882a593Smuzhiyun else
34*4882a593Smuzhiyun i915_request_retire_upto(rq);
35*4882a593Smuzhiyun
36*4882a593Smuzhiyun lockdep_unpin_lock(&tl->mutex, rq->cookie);
37*4882a593Smuzhiyun mutex_unlock(&tl->mutex);
38*4882a593Smuzhiyun
39*4882a593Smuzhiyun i915_request_put(rq);
40*4882a593Smuzhiyun intel_timeline_put(tl);
41*4882a593Smuzhiyun
42*4882a593Smuzhiyun return err;
43*4882a593Smuzhiyun }
44*4882a593Smuzhiyun
context_sync(struct intel_context * ce)45*4882a593Smuzhiyun static int context_sync(struct intel_context *ce)
46*4882a593Smuzhiyun {
47*4882a593Smuzhiyun struct intel_timeline *tl = ce->timeline;
48*4882a593Smuzhiyun int err = 0;
49*4882a593Smuzhiyun
50*4882a593Smuzhiyun mutex_lock(&tl->mutex);
51*4882a593Smuzhiyun do {
52*4882a593Smuzhiyun struct i915_request *rq;
53*4882a593Smuzhiyun long timeout;
54*4882a593Smuzhiyun
55*4882a593Smuzhiyun if (list_empty(&tl->requests))
56*4882a593Smuzhiyun break;
57*4882a593Smuzhiyun
58*4882a593Smuzhiyun rq = list_last_entry(&tl->requests, typeof(*rq), link);
59*4882a593Smuzhiyun i915_request_get(rq);
60*4882a593Smuzhiyun
61*4882a593Smuzhiyun timeout = i915_request_wait(rq, 0, HZ / 10);
62*4882a593Smuzhiyun if (timeout < 0)
63*4882a593Smuzhiyun err = timeout;
64*4882a593Smuzhiyun else
65*4882a593Smuzhiyun i915_request_retire_upto(rq);
66*4882a593Smuzhiyun
67*4882a593Smuzhiyun i915_request_put(rq);
68*4882a593Smuzhiyun } while (!err);
69*4882a593Smuzhiyun mutex_unlock(&tl->mutex);
70*4882a593Smuzhiyun
71*4882a593Smuzhiyun /* Wait for all barriers to complete (remote CPU) before we check */
72*4882a593Smuzhiyun i915_active_unlock_wait(&ce->active);
73*4882a593Smuzhiyun return err;
74*4882a593Smuzhiyun }
75*4882a593Smuzhiyun
__live_context_size(struct intel_engine_cs * engine)76*4882a593Smuzhiyun static int __live_context_size(struct intel_engine_cs *engine)
77*4882a593Smuzhiyun {
78*4882a593Smuzhiyun struct intel_context *ce;
79*4882a593Smuzhiyun struct i915_request *rq;
80*4882a593Smuzhiyun void *vaddr;
81*4882a593Smuzhiyun int err;
82*4882a593Smuzhiyun
83*4882a593Smuzhiyun ce = intel_context_create(engine);
84*4882a593Smuzhiyun if (IS_ERR(ce))
85*4882a593Smuzhiyun return PTR_ERR(ce);
86*4882a593Smuzhiyun
87*4882a593Smuzhiyun err = intel_context_pin(ce);
88*4882a593Smuzhiyun if (err)
89*4882a593Smuzhiyun goto err;
90*4882a593Smuzhiyun
91*4882a593Smuzhiyun vaddr = i915_gem_object_pin_map(ce->state->obj,
92*4882a593Smuzhiyun i915_coherent_map_type(engine->i915));
93*4882a593Smuzhiyun if (IS_ERR(vaddr)) {
94*4882a593Smuzhiyun err = PTR_ERR(vaddr);
95*4882a593Smuzhiyun intel_context_unpin(ce);
96*4882a593Smuzhiyun goto err;
97*4882a593Smuzhiyun }
98*4882a593Smuzhiyun
99*4882a593Smuzhiyun /*
100*4882a593Smuzhiyun * Note that execlists also applies a redzone which it checks on
101*4882a593Smuzhiyun * context unpin when debugging. We are using the same location
102*4882a593Smuzhiyun * and same poison value so that our checks overlap. Despite the
103*4882a593Smuzhiyun * redundancy, we want to keep this little selftest so that we
104*4882a593Smuzhiyun * get coverage of any and all submission backends, and we can
105*4882a593Smuzhiyun * always extend this test to ensure we trick the HW into a
106*4882a593Smuzhiyun * compromising position wrt to the various sections that need
107*4882a593Smuzhiyun * to be written into the context state.
108*4882a593Smuzhiyun *
109*4882a593Smuzhiyun * TLDR; this overlaps with the execlists redzone.
110*4882a593Smuzhiyun */
111*4882a593Smuzhiyun vaddr += engine->context_size - I915_GTT_PAGE_SIZE;
112*4882a593Smuzhiyun memset(vaddr, POISON_INUSE, I915_GTT_PAGE_SIZE);
113*4882a593Smuzhiyun
114*4882a593Smuzhiyun rq = intel_context_create_request(ce);
115*4882a593Smuzhiyun intel_context_unpin(ce);
116*4882a593Smuzhiyun if (IS_ERR(rq)) {
117*4882a593Smuzhiyun err = PTR_ERR(rq);
118*4882a593Smuzhiyun goto err_unpin;
119*4882a593Smuzhiyun }
120*4882a593Smuzhiyun
121*4882a593Smuzhiyun err = request_sync(rq);
122*4882a593Smuzhiyun if (err)
123*4882a593Smuzhiyun goto err_unpin;
124*4882a593Smuzhiyun
125*4882a593Smuzhiyun /* Force the context switch */
126*4882a593Smuzhiyun rq = intel_engine_create_kernel_request(engine);
127*4882a593Smuzhiyun if (IS_ERR(rq)) {
128*4882a593Smuzhiyun err = PTR_ERR(rq);
129*4882a593Smuzhiyun goto err_unpin;
130*4882a593Smuzhiyun }
131*4882a593Smuzhiyun err = request_sync(rq);
132*4882a593Smuzhiyun if (err)
133*4882a593Smuzhiyun goto err_unpin;
134*4882a593Smuzhiyun
135*4882a593Smuzhiyun if (memchr_inv(vaddr, POISON_INUSE, I915_GTT_PAGE_SIZE)) {
136*4882a593Smuzhiyun pr_err("%s context overwrote trailing red-zone!", engine->name);
137*4882a593Smuzhiyun err = -EINVAL;
138*4882a593Smuzhiyun }
139*4882a593Smuzhiyun
140*4882a593Smuzhiyun err_unpin:
141*4882a593Smuzhiyun i915_gem_object_unpin_map(ce->state->obj);
142*4882a593Smuzhiyun err:
143*4882a593Smuzhiyun intel_context_put(ce);
144*4882a593Smuzhiyun return err;
145*4882a593Smuzhiyun }
146*4882a593Smuzhiyun
live_context_size(void * arg)147*4882a593Smuzhiyun static int live_context_size(void *arg)
148*4882a593Smuzhiyun {
149*4882a593Smuzhiyun struct intel_gt *gt = arg;
150*4882a593Smuzhiyun struct intel_engine_cs *engine;
151*4882a593Smuzhiyun enum intel_engine_id id;
152*4882a593Smuzhiyun int err = 0;
153*4882a593Smuzhiyun
154*4882a593Smuzhiyun /*
155*4882a593Smuzhiyun * Check that our context sizes are correct by seeing if the
156*4882a593Smuzhiyun * HW tries to write past the end of one.
157*4882a593Smuzhiyun */
158*4882a593Smuzhiyun
159*4882a593Smuzhiyun for_each_engine(engine, gt, id) {
160*4882a593Smuzhiyun struct file *saved;
161*4882a593Smuzhiyun
162*4882a593Smuzhiyun if (!engine->context_size)
163*4882a593Smuzhiyun continue;
164*4882a593Smuzhiyun
165*4882a593Smuzhiyun intel_engine_pm_get(engine);
166*4882a593Smuzhiyun
167*4882a593Smuzhiyun /*
168*4882a593Smuzhiyun * Hide the old default state -- we lie about the context size
169*4882a593Smuzhiyun * and get confused when the default state is smaller than
170*4882a593Smuzhiyun * expected. For our do nothing request, inheriting the
171*4882a593Smuzhiyun * active state is sufficient, we are only checking that we
172*4882a593Smuzhiyun * don't use more than we planned.
173*4882a593Smuzhiyun */
174*4882a593Smuzhiyun saved = fetch_and_zero(&engine->default_state);
175*4882a593Smuzhiyun
176*4882a593Smuzhiyun /* Overlaps with the execlists redzone */
177*4882a593Smuzhiyun engine->context_size += I915_GTT_PAGE_SIZE;
178*4882a593Smuzhiyun
179*4882a593Smuzhiyun err = __live_context_size(engine);
180*4882a593Smuzhiyun
181*4882a593Smuzhiyun engine->context_size -= I915_GTT_PAGE_SIZE;
182*4882a593Smuzhiyun
183*4882a593Smuzhiyun engine->default_state = saved;
184*4882a593Smuzhiyun
185*4882a593Smuzhiyun intel_engine_pm_put(engine);
186*4882a593Smuzhiyun
187*4882a593Smuzhiyun if (err)
188*4882a593Smuzhiyun break;
189*4882a593Smuzhiyun }
190*4882a593Smuzhiyun
191*4882a593Smuzhiyun return err;
192*4882a593Smuzhiyun }
193*4882a593Smuzhiyun
__live_active_context(struct intel_engine_cs * engine)194*4882a593Smuzhiyun static int __live_active_context(struct intel_engine_cs *engine)
195*4882a593Smuzhiyun {
196*4882a593Smuzhiyun unsigned long saved_heartbeat;
197*4882a593Smuzhiyun struct intel_context *ce;
198*4882a593Smuzhiyun int pass;
199*4882a593Smuzhiyun int err;
200*4882a593Smuzhiyun
201*4882a593Smuzhiyun /*
202*4882a593Smuzhiyun * We keep active contexts alive until after a subsequent context
203*4882a593Smuzhiyun * switch as the final write from the context-save will be after
204*4882a593Smuzhiyun * we retire the final request. We track when we unpin the context,
205*4882a593Smuzhiyun * under the presumption that the final pin is from the last request,
206*4882a593Smuzhiyun * and instead of immediately unpinning the context, we add a task
207*4882a593Smuzhiyun * to unpin the context from the next idle-barrier.
208*4882a593Smuzhiyun *
209*4882a593Smuzhiyun * This test makes sure that the context is kept alive until a
210*4882a593Smuzhiyun * subsequent idle-barrier (emitted when the engine wakeref hits 0
211*4882a593Smuzhiyun * with no more outstanding requests).
212*4882a593Smuzhiyun */
213*4882a593Smuzhiyun
214*4882a593Smuzhiyun if (intel_engine_pm_is_awake(engine)) {
215*4882a593Smuzhiyun pr_err("%s is awake before starting %s!\n",
216*4882a593Smuzhiyun engine->name, __func__);
217*4882a593Smuzhiyun return -EINVAL;
218*4882a593Smuzhiyun }
219*4882a593Smuzhiyun
220*4882a593Smuzhiyun ce = intel_context_create(engine);
221*4882a593Smuzhiyun if (IS_ERR(ce))
222*4882a593Smuzhiyun return PTR_ERR(ce);
223*4882a593Smuzhiyun
224*4882a593Smuzhiyun saved_heartbeat = engine->props.heartbeat_interval_ms;
225*4882a593Smuzhiyun engine->props.heartbeat_interval_ms = 0;
226*4882a593Smuzhiyun
227*4882a593Smuzhiyun for (pass = 0; pass <= 2; pass++) {
228*4882a593Smuzhiyun struct i915_request *rq;
229*4882a593Smuzhiyun
230*4882a593Smuzhiyun intel_engine_pm_get(engine);
231*4882a593Smuzhiyun
232*4882a593Smuzhiyun rq = intel_context_create_request(ce);
233*4882a593Smuzhiyun if (IS_ERR(rq)) {
234*4882a593Smuzhiyun err = PTR_ERR(rq);
235*4882a593Smuzhiyun goto out_engine;
236*4882a593Smuzhiyun }
237*4882a593Smuzhiyun
238*4882a593Smuzhiyun err = request_sync(rq);
239*4882a593Smuzhiyun if (err)
240*4882a593Smuzhiyun goto out_engine;
241*4882a593Smuzhiyun
242*4882a593Smuzhiyun /* Context will be kept active until after an idle-barrier. */
243*4882a593Smuzhiyun if (i915_active_is_idle(&ce->active)) {
244*4882a593Smuzhiyun pr_err("context is not active; expected idle-barrier (%s pass %d)\n",
245*4882a593Smuzhiyun engine->name, pass);
246*4882a593Smuzhiyun err = -EINVAL;
247*4882a593Smuzhiyun goto out_engine;
248*4882a593Smuzhiyun }
249*4882a593Smuzhiyun
250*4882a593Smuzhiyun if (!intel_engine_pm_is_awake(engine)) {
251*4882a593Smuzhiyun pr_err("%s is asleep before idle-barrier\n",
252*4882a593Smuzhiyun engine->name);
253*4882a593Smuzhiyun err = -EINVAL;
254*4882a593Smuzhiyun goto out_engine;
255*4882a593Smuzhiyun }
256*4882a593Smuzhiyun
257*4882a593Smuzhiyun out_engine:
258*4882a593Smuzhiyun intel_engine_pm_put(engine);
259*4882a593Smuzhiyun if (err)
260*4882a593Smuzhiyun goto err;
261*4882a593Smuzhiyun }
262*4882a593Smuzhiyun
263*4882a593Smuzhiyun /* Now make sure our idle-barriers are flushed */
264*4882a593Smuzhiyun err = intel_engine_flush_barriers(engine);
265*4882a593Smuzhiyun if (err)
266*4882a593Smuzhiyun goto err;
267*4882a593Smuzhiyun
268*4882a593Smuzhiyun /* Wait for the barrier and in the process wait for engine to park */
269*4882a593Smuzhiyun err = context_sync(engine->kernel_context);
270*4882a593Smuzhiyun if (err)
271*4882a593Smuzhiyun goto err;
272*4882a593Smuzhiyun
273*4882a593Smuzhiyun if (!i915_active_is_idle(&ce->active)) {
274*4882a593Smuzhiyun pr_err("context is still active!");
275*4882a593Smuzhiyun err = -EINVAL;
276*4882a593Smuzhiyun }
277*4882a593Smuzhiyun
278*4882a593Smuzhiyun intel_engine_pm_flush(engine);
279*4882a593Smuzhiyun
280*4882a593Smuzhiyun if (intel_engine_pm_is_awake(engine)) {
281*4882a593Smuzhiyun struct drm_printer p = drm_debug_printer(__func__);
282*4882a593Smuzhiyun
283*4882a593Smuzhiyun intel_engine_dump(engine, &p,
284*4882a593Smuzhiyun "%s is still awake:%d after idle-barriers\n",
285*4882a593Smuzhiyun engine->name,
286*4882a593Smuzhiyun atomic_read(&engine->wakeref.count));
287*4882a593Smuzhiyun GEM_TRACE_DUMP();
288*4882a593Smuzhiyun
289*4882a593Smuzhiyun err = -EINVAL;
290*4882a593Smuzhiyun goto err;
291*4882a593Smuzhiyun }
292*4882a593Smuzhiyun
293*4882a593Smuzhiyun err:
294*4882a593Smuzhiyun engine->props.heartbeat_interval_ms = saved_heartbeat;
295*4882a593Smuzhiyun intel_context_put(ce);
296*4882a593Smuzhiyun return err;
297*4882a593Smuzhiyun }
298*4882a593Smuzhiyun
live_active_context(void * arg)299*4882a593Smuzhiyun static int live_active_context(void *arg)
300*4882a593Smuzhiyun {
301*4882a593Smuzhiyun struct intel_gt *gt = arg;
302*4882a593Smuzhiyun struct intel_engine_cs *engine;
303*4882a593Smuzhiyun enum intel_engine_id id;
304*4882a593Smuzhiyun int err = 0;
305*4882a593Smuzhiyun
306*4882a593Smuzhiyun for_each_engine(engine, gt, id) {
307*4882a593Smuzhiyun err = __live_active_context(engine);
308*4882a593Smuzhiyun if (err)
309*4882a593Smuzhiyun break;
310*4882a593Smuzhiyun
311*4882a593Smuzhiyun err = igt_flush_test(gt->i915);
312*4882a593Smuzhiyun if (err)
313*4882a593Smuzhiyun break;
314*4882a593Smuzhiyun }
315*4882a593Smuzhiyun
316*4882a593Smuzhiyun return err;
317*4882a593Smuzhiyun }
318*4882a593Smuzhiyun
__remote_sync(struct intel_context * ce,struct intel_context * remote)319*4882a593Smuzhiyun static int __remote_sync(struct intel_context *ce, struct intel_context *remote)
320*4882a593Smuzhiyun {
321*4882a593Smuzhiyun struct i915_request *rq;
322*4882a593Smuzhiyun int err;
323*4882a593Smuzhiyun
324*4882a593Smuzhiyun err = intel_context_pin(remote);
325*4882a593Smuzhiyun if (err)
326*4882a593Smuzhiyun return err;
327*4882a593Smuzhiyun
328*4882a593Smuzhiyun rq = intel_context_create_request(ce);
329*4882a593Smuzhiyun if (IS_ERR(rq)) {
330*4882a593Smuzhiyun err = PTR_ERR(rq);
331*4882a593Smuzhiyun goto unpin;
332*4882a593Smuzhiyun }
333*4882a593Smuzhiyun
334*4882a593Smuzhiyun err = intel_context_prepare_remote_request(remote, rq);
335*4882a593Smuzhiyun if (err) {
336*4882a593Smuzhiyun i915_request_add(rq);
337*4882a593Smuzhiyun goto unpin;
338*4882a593Smuzhiyun }
339*4882a593Smuzhiyun
340*4882a593Smuzhiyun err = request_sync(rq);
341*4882a593Smuzhiyun
342*4882a593Smuzhiyun unpin:
343*4882a593Smuzhiyun intel_context_unpin(remote);
344*4882a593Smuzhiyun return err;
345*4882a593Smuzhiyun }
346*4882a593Smuzhiyun
__live_remote_context(struct intel_engine_cs * engine)347*4882a593Smuzhiyun static int __live_remote_context(struct intel_engine_cs *engine)
348*4882a593Smuzhiyun {
349*4882a593Smuzhiyun struct intel_context *local, *remote;
350*4882a593Smuzhiyun unsigned long saved_heartbeat;
351*4882a593Smuzhiyun int pass;
352*4882a593Smuzhiyun int err;
353*4882a593Smuzhiyun
354*4882a593Smuzhiyun /*
355*4882a593Smuzhiyun * Check that our idle barriers do not interfere with normal
356*4882a593Smuzhiyun * activity tracking. In particular, check that operating
357*4882a593Smuzhiyun * on the context image remotely (intel_context_prepare_remote_request),
358*4882a593Smuzhiyun * which inserts foreign fences into intel_context.active, does not
359*4882a593Smuzhiyun * clobber the idle-barrier.
360*4882a593Smuzhiyun */
361*4882a593Smuzhiyun
362*4882a593Smuzhiyun if (intel_engine_pm_is_awake(engine)) {
363*4882a593Smuzhiyun pr_err("%s is awake before starting %s!\n",
364*4882a593Smuzhiyun engine->name, __func__);
365*4882a593Smuzhiyun return -EINVAL;
366*4882a593Smuzhiyun }
367*4882a593Smuzhiyun
368*4882a593Smuzhiyun remote = intel_context_create(engine);
369*4882a593Smuzhiyun if (IS_ERR(remote))
370*4882a593Smuzhiyun return PTR_ERR(remote);
371*4882a593Smuzhiyun
372*4882a593Smuzhiyun local = intel_context_create(engine);
373*4882a593Smuzhiyun if (IS_ERR(local)) {
374*4882a593Smuzhiyun err = PTR_ERR(local);
375*4882a593Smuzhiyun goto err_remote;
376*4882a593Smuzhiyun }
377*4882a593Smuzhiyun
378*4882a593Smuzhiyun saved_heartbeat = engine->props.heartbeat_interval_ms;
379*4882a593Smuzhiyun engine->props.heartbeat_interval_ms = 0;
380*4882a593Smuzhiyun intel_engine_pm_get(engine);
381*4882a593Smuzhiyun
382*4882a593Smuzhiyun for (pass = 0; pass <= 2; pass++) {
383*4882a593Smuzhiyun err = __remote_sync(local, remote);
384*4882a593Smuzhiyun if (err)
385*4882a593Smuzhiyun break;
386*4882a593Smuzhiyun
387*4882a593Smuzhiyun err = __remote_sync(engine->kernel_context, remote);
388*4882a593Smuzhiyun if (err)
389*4882a593Smuzhiyun break;
390*4882a593Smuzhiyun
391*4882a593Smuzhiyun if (i915_active_is_idle(&remote->active)) {
392*4882a593Smuzhiyun pr_err("remote context is not active; expected idle-barrier (%s pass %d)\n",
393*4882a593Smuzhiyun engine->name, pass);
394*4882a593Smuzhiyun err = -EINVAL;
395*4882a593Smuzhiyun break;
396*4882a593Smuzhiyun }
397*4882a593Smuzhiyun }
398*4882a593Smuzhiyun
399*4882a593Smuzhiyun intel_engine_pm_put(engine);
400*4882a593Smuzhiyun engine->props.heartbeat_interval_ms = saved_heartbeat;
401*4882a593Smuzhiyun
402*4882a593Smuzhiyun intel_context_put(local);
403*4882a593Smuzhiyun err_remote:
404*4882a593Smuzhiyun intel_context_put(remote);
405*4882a593Smuzhiyun return err;
406*4882a593Smuzhiyun }
407*4882a593Smuzhiyun
live_remote_context(void * arg)408*4882a593Smuzhiyun static int live_remote_context(void *arg)
409*4882a593Smuzhiyun {
410*4882a593Smuzhiyun struct intel_gt *gt = arg;
411*4882a593Smuzhiyun struct intel_engine_cs *engine;
412*4882a593Smuzhiyun enum intel_engine_id id;
413*4882a593Smuzhiyun int err = 0;
414*4882a593Smuzhiyun
415*4882a593Smuzhiyun for_each_engine(engine, gt, id) {
416*4882a593Smuzhiyun err = __live_remote_context(engine);
417*4882a593Smuzhiyun if (err)
418*4882a593Smuzhiyun break;
419*4882a593Smuzhiyun
420*4882a593Smuzhiyun err = igt_flush_test(gt->i915);
421*4882a593Smuzhiyun if (err)
422*4882a593Smuzhiyun break;
423*4882a593Smuzhiyun }
424*4882a593Smuzhiyun
425*4882a593Smuzhiyun return err;
426*4882a593Smuzhiyun }
427*4882a593Smuzhiyun
intel_context_live_selftests(struct drm_i915_private * i915)428*4882a593Smuzhiyun int intel_context_live_selftests(struct drm_i915_private *i915)
429*4882a593Smuzhiyun {
430*4882a593Smuzhiyun static const struct i915_subtest tests[] = {
431*4882a593Smuzhiyun SUBTEST(live_context_size),
432*4882a593Smuzhiyun SUBTEST(live_active_context),
433*4882a593Smuzhiyun SUBTEST(live_remote_context),
434*4882a593Smuzhiyun };
435*4882a593Smuzhiyun struct intel_gt *gt = &i915->gt;
436*4882a593Smuzhiyun
437*4882a593Smuzhiyun if (intel_gt_is_wedged(gt))
438*4882a593Smuzhiyun return 0;
439*4882a593Smuzhiyun
440*4882a593Smuzhiyun return intel_gt_live_subtests(tests, gt);
441*4882a593Smuzhiyun }
442