1*4882a593Smuzhiyun // SPDX-License-Identifier: MIT
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Copyright © 2019 Intel Corporation
4*4882a593Smuzhiyun */
5*4882a593Smuzhiyun
6*4882a593Smuzhiyun #include "debugfs_gt.h"
7*4882a593Smuzhiyun #include "i915_drv.h"
8*4882a593Smuzhiyun #include "intel_context.h"
9*4882a593Smuzhiyun #include "intel_gt.h"
10*4882a593Smuzhiyun #include "intel_gt_buffer_pool.h"
11*4882a593Smuzhiyun #include "intel_gt_clock_utils.h"
12*4882a593Smuzhiyun #include "intel_gt_pm.h"
13*4882a593Smuzhiyun #include "intel_gt_requests.h"
14*4882a593Smuzhiyun #include "intel_mocs.h"
15*4882a593Smuzhiyun #include "intel_rc6.h"
16*4882a593Smuzhiyun #include "intel_renderstate.h"
17*4882a593Smuzhiyun #include "intel_rps.h"
18*4882a593Smuzhiyun #include "intel_uncore.h"
19*4882a593Smuzhiyun #include "intel_pm.h"
20*4882a593Smuzhiyun #include "shmem_utils.h"
21*4882a593Smuzhiyun
intel_gt_init_early(struct intel_gt * gt,struct drm_i915_private * i915)22*4882a593Smuzhiyun void intel_gt_init_early(struct intel_gt *gt, struct drm_i915_private *i915)
23*4882a593Smuzhiyun {
24*4882a593Smuzhiyun gt->i915 = i915;
25*4882a593Smuzhiyun gt->uncore = &i915->uncore;
26*4882a593Smuzhiyun
27*4882a593Smuzhiyun spin_lock_init(>->irq_lock);
28*4882a593Smuzhiyun
29*4882a593Smuzhiyun mutex_init(>->tlb_invalidate_lock);
30*4882a593Smuzhiyun
31*4882a593Smuzhiyun INIT_LIST_HEAD(>->closed_vma);
32*4882a593Smuzhiyun spin_lock_init(>->closed_lock);
33*4882a593Smuzhiyun
34*4882a593Smuzhiyun intel_gt_init_buffer_pool(gt);
35*4882a593Smuzhiyun intel_gt_init_reset(gt);
36*4882a593Smuzhiyun intel_gt_init_requests(gt);
37*4882a593Smuzhiyun intel_gt_init_timelines(gt);
38*4882a593Smuzhiyun intel_gt_pm_init_early(gt);
39*4882a593Smuzhiyun
40*4882a593Smuzhiyun intel_rps_init_early(>->rps);
41*4882a593Smuzhiyun intel_uc_init_early(>->uc);
42*4882a593Smuzhiyun }
43*4882a593Smuzhiyun
intel_gt_init_hw_early(struct intel_gt * gt,struct i915_ggtt * ggtt)44*4882a593Smuzhiyun void intel_gt_init_hw_early(struct intel_gt *gt, struct i915_ggtt *ggtt)
45*4882a593Smuzhiyun {
46*4882a593Smuzhiyun gt->ggtt = ggtt;
47*4882a593Smuzhiyun }
48*4882a593Smuzhiyun
intel_gt_init_mmio(struct intel_gt * gt)49*4882a593Smuzhiyun int intel_gt_init_mmio(struct intel_gt *gt)
50*4882a593Smuzhiyun {
51*4882a593Smuzhiyun intel_uc_init_mmio(>->uc);
52*4882a593Smuzhiyun intel_sseu_info_init(gt);
53*4882a593Smuzhiyun
54*4882a593Smuzhiyun return intel_engines_init_mmio(gt);
55*4882a593Smuzhiyun }
56*4882a593Smuzhiyun
init_unused_ring(struct intel_gt * gt,u32 base)57*4882a593Smuzhiyun static void init_unused_ring(struct intel_gt *gt, u32 base)
58*4882a593Smuzhiyun {
59*4882a593Smuzhiyun struct intel_uncore *uncore = gt->uncore;
60*4882a593Smuzhiyun
61*4882a593Smuzhiyun intel_uncore_write(uncore, RING_CTL(base), 0);
62*4882a593Smuzhiyun intel_uncore_write(uncore, RING_HEAD(base), 0);
63*4882a593Smuzhiyun intel_uncore_write(uncore, RING_TAIL(base), 0);
64*4882a593Smuzhiyun intel_uncore_write(uncore, RING_START(base), 0);
65*4882a593Smuzhiyun }
66*4882a593Smuzhiyun
init_unused_rings(struct intel_gt * gt)67*4882a593Smuzhiyun static void init_unused_rings(struct intel_gt *gt)
68*4882a593Smuzhiyun {
69*4882a593Smuzhiyun struct drm_i915_private *i915 = gt->i915;
70*4882a593Smuzhiyun
71*4882a593Smuzhiyun if (IS_I830(i915)) {
72*4882a593Smuzhiyun init_unused_ring(gt, PRB1_BASE);
73*4882a593Smuzhiyun init_unused_ring(gt, SRB0_BASE);
74*4882a593Smuzhiyun init_unused_ring(gt, SRB1_BASE);
75*4882a593Smuzhiyun init_unused_ring(gt, SRB2_BASE);
76*4882a593Smuzhiyun init_unused_ring(gt, SRB3_BASE);
77*4882a593Smuzhiyun } else if (IS_GEN(i915, 2)) {
78*4882a593Smuzhiyun init_unused_ring(gt, SRB0_BASE);
79*4882a593Smuzhiyun init_unused_ring(gt, SRB1_BASE);
80*4882a593Smuzhiyun } else if (IS_GEN(i915, 3)) {
81*4882a593Smuzhiyun init_unused_ring(gt, PRB1_BASE);
82*4882a593Smuzhiyun init_unused_ring(gt, PRB2_BASE);
83*4882a593Smuzhiyun }
84*4882a593Smuzhiyun }
85*4882a593Smuzhiyun
intel_gt_init_hw(struct intel_gt * gt)86*4882a593Smuzhiyun int intel_gt_init_hw(struct intel_gt *gt)
87*4882a593Smuzhiyun {
88*4882a593Smuzhiyun struct drm_i915_private *i915 = gt->i915;
89*4882a593Smuzhiyun struct intel_uncore *uncore = gt->uncore;
90*4882a593Smuzhiyun int ret;
91*4882a593Smuzhiyun
92*4882a593Smuzhiyun gt->last_init_time = ktime_get();
93*4882a593Smuzhiyun
94*4882a593Smuzhiyun /* Double layer security blanket, see i915_gem_init() */
95*4882a593Smuzhiyun intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL);
96*4882a593Smuzhiyun
97*4882a593Smuzhiyun if (HAS_EDRAM(i915) && INTEL_GEN(i915) < 9)
98*4882a593Smuzhiyun intel_uncore_rmw(uncore, HSW_IDICR, 0, IDIHASHMSK(0xf));
99*4882a593Smuzhiyun
100*4882a593Smuzhiyun if (IS_HASWELL(i915))
101*4882a593Smuzhiyun intel_uncore_write(uncore,
102*4882a593Smuzhiyun MI_PREDICATE_RESULT_2,
103*4882a593Smuzhiyun IS_HSW_GT3(i915) ?
104*4882a593Smuzhiyun LOWER_SLICE_ENABLED : LOWER_SLICE_DISABLED);
105*4882a593Smuzhiyun
106*4882a593Smuzhiyun /* Apply the GT workarounds... */
107*4882a593Smuzhiyun intel_gt_apply_workarounds(gt);
108*4882a593Smuzhiyun /* ...and determine whether they are sticking. */
109*4882a593Smuzhiyun intel_gt_verify_workarounds(gt, "init");
110*4882a593Smuzhiyun
111*4882a593Smuzhiyun intel_gt_init_swizzling(gt);
112*4882a593Smuzhiyun
113*4882a593Smuzhiyun /*
114*4882a593Smuzhiyun * At least 830 can leave some of the unused rings
115*4882a593Smuzhiyun * "active" (ie. head != tail) after resume which
116*4882a593Smuzhiyun * will prevent c3 entry. Makes sure all unused rings
117*4882a593Smuzhiyun * are totally idle.
118*4882a593Smuzhiyun */
119*4882a593Smuzhiyun init_unused_rings(gt);
120*4882a593Smuzhiyun
121*4882a593Smuzhiyun ret = i915_ppgtt_init_hw(gt);
122*4882a593Smuzhiyun if (ret) {
123*4882a593Smuzhiyun DRM_ERROR("Enabling PPGTT failed (%d)\n", ret);
124*4882a593Smuzhiyun goto out;
125*4882a593Smuzhiyun }
126*4882a593Smuzhiyun
127*4882a593Smuzhiyun /* We can't enable contexts until all firmware is loaded */
128*4882a593Smuzhiyun ret = intel_uc_init_hw(>->uc);
129*4882a593Smuzhiyun if (ret) {
130*4882a593Smuzhiyun i915_probe_error(i915, "Enabling uc failed (%d)\n", ret);
131*4882a593Smuzhiyun goto out;
132*4882a593Smuzhiyun }
133*4882a593Smuzhiyun
134*4882a593Smuzhiyun intel_mocs_init(gt);
135*4882a593Smuzhiyun
136*4882a593Smuzhiyun out:
137*4882a593Smuzhiyun intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL);
138*4882a593Smuzhiyun return ret;
139*4882a593Smuzhiyun }
140*4882a593Smuzhiyun
rmw_set(struct intel_uncore * uncore,i915_reg_t reg,u32 set)141*4882a593Smuzhiyun static void rmw_set(struct intel_uncore *uncore, i915_reg_t reg, u32 set)
142*4882a593Smuzhiyun {
143*4882a593Smuzhiyun intel_uncore_rmw(uncore, reg, 0, set);
144*4882a593Smuzhiyun }
145*4882a593Smuzhiyun
rmw_clear(struct intel_uncore * uncore,i915_reg_t reg,u32 clr)146*4882a593Smuzhiyun static void rmw_clear(struct intel_uncore *uncore, i915_reg_t reg, u32 clr)
147*4882a593Smuzhiyun {
148*4882a593Smuzhiyun intel_uncore_rmw(uncore, reg, clr, 0);
149*4882a593Smuzhiyun }
150*4882a593Smuzhiyun
clear_register(struct intel_uncore * uncore,i915_reg_t reg)151*4882a593Smuzhiyun static void clear_register(struct intel_uncore *uncore, i915_reg_t reg)
152*4882a593Smuzhiyun {
153*4882a593Smuzhiyun intel_uncore_rmw(uncore, reg, 0, 0);
154*4882a593Smuzhiyun }
155*4882a593Smuzhiyun
gen8_clear_engine_error_register(struct intel_engine_cs * engine)156*4882a593Smuzhiyun static void gen8_clear_engine_error_register(struct intel_engine_cs *engine)
157*4882a593Smuzhiyun {
158*4882a593Smuzhiyun GEN6_RING_FAULT_REG_RMW(engine, RING_FAULT_VALID, 0);
159*4882a593Smuzhiyun GEN6_RING_FAULT_REG_POSTING_READ(engine);
160*4882a593Smuzhiyun }
161*4882a593Smuzhiyun
162*4882a593Smuzhiyun void
intel_gt_clear_error_registers(struct intel_gt * gt,intel_engine_mask_t engine_mask)163*4882a593Smuzhiyun intel_gt_clear_error_registers(struct intel_gt *gt,
164*4882a593Smuzhiyun intel_engine_mask_t engine_mask)
165*4882a593Smuzhiyun {
166*4882a593Smuzhiyun struct drm_i915_private *i915 = gt->i915;
167*4882a593Smuzhiyun struct intel_uncore *uncore = gt->uncore;
168*4882a593Smuzhiyun u32 eir;
169*4882a593Smuzhiyun
170*4882a593Smuzhiyun if (!IS_GEN(i915, 2))
171*4882a593Smuzhiyun clear_register(uncore, PGTBL_ER);
172*4882a593Smuzhiyun
173*4882a593Smuzhiyun if (INTEL_GEN(i915) < 4)
174*4882a593Smuzhiyun clear_register(uncore, IPEIR(RENDER_RING_BASE));
175*4882a593Smuzhiyun else
176*4882a593Smuzhiyun clear_register(uncore, IPEIR_I965);
177*4882a593Smuzhiyun
178*4882a593Smuzhiyun clear_register(uncore, EIR);
179*4882a593Smuzhiyun eir = intel_uncore_read(uncore, EIR);
180*4882a593Smuzhiyun if (eir) {
181*4882a593Smuzhiyun /*
182*4882a593Smuzhiyun * some errors might have become stuck,
183*4882a593Smuzhiyun * mask them.
184*4882a593Smuzhiyun */
185*4882a593Smuzhiyun DRM_DEBUG_DRIVER("EIR stuck: 0x%08x, masking\n", eir);
186*4882a593Smuzhiyun rmw_set(uncore, EMR, eir);
187*4882a593Smuzhiyun intel_uncore_write(uncore, GEN2_IIR,
188*4882a593Smuzhiyun I915_MASTER_ERROR_INTERRUPT);
189*4882a593Smuzhiyun }
190*4882a593Smuzhiyun
191*4882a593Smuzhiyun if (INTEL_GEN(i915) >= 12) {
192*4882a593Smuzhiyun rmw_clear(uncore, GEN12_RING_FAULT_REG, RING_FAULT_VALID);
193*4882a593Smuzhiyun intel_uncore_posting_read(uncore, GEN12_RING_FAULT_REG);
194*4882a593Smuzhiyun } else if (INTEL_GEN(i915) >= 8) {
195*4882a593Smuzhiyun rmw_clear(uncore, GEN8_RING_FAULT_REG, RING_FAULT_VALID);
196*4882a593Smuzhiyun intel_uncore_posting_read(uncore, GEN8_RING_FAULT_REG);
197*4882a593Smuzhiyun } else if (INTEL_GEN(i915) >= 6) {
198*4882a593Smuzhiyun struct intel_engine_cs *engine;
199*4882a593Smuzhiyun enum intel_engine_id id;
200*4882a593Smuzhiyun
201*4882a593Smuzhiyun for_each_engine_masked(engine, gt, engine_mask, id)
202*4882a593Smuzhiyun gen8_clear_engine_error_register(engine);
203*4882a593Smuzhiyun }
204*4882a593Smuzhiyun }
205*4882a593Smuzhiyun
gen6_check_faults(struct intel_gt * gt)206*4882a593Smuzhiyun static void gen6_check_faults(struct intel_gt *gt)
207*4882a593Smuzhiyun {
208*4882a593Smuzhiyun struct intel_engine_cs *engine;
209*4882a593Smuzhiyun enum intel_engine_id id;
210*4882a593Smuzhiyun u32 fault;
211*4882a593Smuzhiyun
212*4882a593Smuzhiyun for_each_engine(engine, gt, id) {
213*4882a593Smuzhiyun fault = GEN6_RING_FAULT_REG_READ(engine);
214*4882a593Smuzhiyun if (fault & RING_FAULT_VALID) {
215*4882a593Smuzhiyun drm_dbg(&engine->i915->drm, "Unexpected fault\n"
216*4882a593Smuzhiyun "\tAddr: 0x%08lx\n"
217*4882a593Smuzhiyun "\tAddress space: %s\n"
218*4882a593Smuzhiyun "\tSource ID: %d\n"
219*4882a593Smuzhiyun "\tType: %d\n",
220*4882a593Smuzhiyun fault & PAGE_MASK,
221*4882a593Smuzhiyun fault & RING_FAULT_GTTSEL_MASK ?
222*4882a593Smuzhiyun "GGTT" : "PPGTT",
223*4882a593Smuzhiyun RING_FAULT_SRCID(fault),
224*4882a593Smuzhiyun RING_FAULT_FAULT_TYPE(fault));
225*4882a593Smuzhiyun }
226*4882a593Smuzhiyun }
227*4882a593Smuzhiyun }
228*4882a593Smuzhiyun
gen8_check_faults(struct intel_gt * gt)229*4882a593Smuzhiyun static void gen8_check_faults(struct intel_gt *gt)
230*4882a593Smuzhiyun {
231*4882a593Smuzhiyun struct intel_uncore *uncore = gt->uncore;
232*4882a593Smuzhiyun i915_reg_t fault_reg, fault_data0_reg, fault_data1_reg;
233*4882a593Smuzhiyun u32 fault;
234*4882a593Smuzhiyun
235*4882a593Smuzhiyun if (INTEL_GEN(gt->i915) >= 12) {
236*4882a593Smuzhiyun fault_reg = GEN12_RING_FAULT_REG;
237*4882a593Smuzhiyun fault_data0_reg = GEN12_FAULT_TLB_DATA0;
238*4882a593Smuzhiyun fault_data1_reg = GEN12_FAULT_TLB_DATA1;
239*4882a593Smuzhiyun } else {
240*4882a593Smuzhiyun fault_reg = GEN8_RING_FAULT_REG;
241*4882a593Smuzhiyun fault_data0_reg = GEN8_FAULT_TLB_DATA0;
242*4882a593Smuzhiyun fault_data1_reg = GEN8_FAULT_TLB_DATA1;
243*4882a593Smuzhiyun }
244*4882a593Smuzhiyun
245*4882a593Smuzhiyun fault = intel_uncore_read(uncore, fault_reg);
246*4882a593Smuzhiyun if (fault & RING_FAULT_VALID) {
247*4882a593Smuzhiyun u32 fault_data0, fault_data1;
248*4882a593Smuzhiyun u64 fault_addr;
249*4882a593Smuzhiyun
250*4882a593Smuzhiyun fault_data0 = intel_uncore_read(uncore, fault_data0_reg);
251*4882a593Smuzhiyun fault_data1 = intel_uncore_read(uncore, fault_data1_reg);
252*4882a593Smuzhiyun
253*4882a593Smuzhiyun fault_addr = ((u64)(fault_data1 & FAULT_VA_HIGH_BITS) << 44) |
254*4882a593Smuzhiyun ((u64)fault_data0 << 12);
255*4882a593Smuzhiyun
256*4882a593Smuzhiyun drm_dbg(&uncore->i915->drm, "Unexpected fault\n"
257*4882a593Smuzhiyun "\tAddr: 0x%08x_%08x\n"
258*4882a593Smuzhiyun "\tAddress space: %s\n"
259*4882a593Smuzhiyun "\tEngine ID: %d\n"
260*4882a593Smuzhiyun "\tSource ID: %d\n"
261*4882a593Smuzhiyun "\tType: %d\n",
262*4882a593Smuzhiyun upper_32_bits(fault_addr), lower_32_bits(fault_addr),
263*4882a593Smuzhiyun fault_data1 & FAULT_GTT_SEL ? "GGTT" : "PPGTT",
264*4882a593Smuzhiyun GEN8_RING_FAULT_ENGINE_ID(fault),
265*4882a593Smuzhiyun RING_FAULT_SRCID(fault),
266*4882a593Smuzhiyun RING_FAULT_FAULT_TYPE(fault));
267*4882a593Smuzhiyun }
268*4882a593Smuzhiyun }
269*4882a593Smuzhiyun
intel_gt_check_and_clear_faults(struct intel_gt * gt)270*4882a593Smuzhiyun void intel_gt_check_and_clear_faults(struct intel_gt *gt)
271*4882a593Smuzhiyun {
272*4882a593Smuzhiyun struct drm_i915_private *i915 = gt->i915;
273*4882a593Smuzhiyun
274*4882a593Smuzhiyun /* From GEN8 onwards we only have one 'All Engine Fault Register' */
275*4882a593Smuzhiyun if (INTEL_GEN(i915) >= 8)
276*4882a593Smuzhiyun gen8_check_faults(gt);
277*4882a593Smuzhiyun else if (INTEL_GEN(i915) >= 6)
278*4882a593Smuzhiyun gen6_check_faults(gt);
279*4882a593Smuzhiyun else
280*4882a593Smuzhiyun return;
281*4882a593Smuzhiyun
282*4882a593Smuzhiyun intel_gt_clear_error_registers(gt, ALL_ENGINES);
283*4882a593Smuzhiyun }
284*4882a593Smuzhiyun
intel_gt_flush_ggtt_writes(struct intel_gt * gt)285*4882a593Smuzhiyun void intel_gt_flush_ggtt_writes(struct intel_gt *gt)
286*4882a593Smuzhiyun {
287*4882a593Smuzhiyun struct intel_uncore *uncore = gt->uncore;
288*4882a593Smuzhiyun intel_wakeref_t wakeref;
289*4882a593Smuzhiyun
290*4882a593Smuzhiyun /*
291*4882a593Smuzhiyun * No actual flushing is required for the GTT write domain for reads
292*4882a593Smuzhiyun * from the GTT domain. Writes to it "immediately" go to main memory
293*4882a593Smuzhiyun * as far as we know, so there's no chipset flush. It also doesn't
294*4882a593Smuzhiyun * land in the GPU render cache.
295*4882a593Smuzhiyun *
296*4882a593Smuzhiyun * However, we do have to enforce the order so that all writes through
297*4882a593Smuzhiyun * the GTT land before any writes to the device, such as updates to
298*4882a593Smuzhiyun * the GATT itself.
299*4882a593Smuzhiyun *
300*4882a593Smuzhiyun * We also have to wait a bit for the writes to land from the GTT.
301*4882a593Smuzhiyun * An uncached read (i.e. mmio) seems to be ideal for the round-trip
302*4882a593Smuzhiyun * timing. This issue has only been observed when switching quickly
303*4882a593Smuzhiyun * between GTT writes and CPU reads from inside the kernel on recent hw,
304*4882a593Smuzhiyun * and it appears to only affect discrete GTT blocks (i.e. on LLC
305*4882a593Smuzhiyun * system agents we cannot reproduce this behaviour, until Cannonlake
306*4882a593Smuzhiyun * that was!).
307*4882a593Smuzhiyun */
308*4882a593Smuzhiyun
309*4882a593Smuzhiyun wmb();
310*4882a593Smuzhiyun
311*4882a593Smuzhiyun if (INTEL_INFO(gt->i915)->has_coherent_ggtt)
312*4882a593Smuzhiyun return;
313*4882a593Smuzhiyun
314*4882a593Smuzhiyun intel_gt_chipset_flush(gt);
315*4882a593Smuzhiyun
316*4882a593Smuzhiyun with_intel_runtime_pm_if_in_use(uncore->rpm, wakeref) {
317*4882a593Smuzhiyun unsigned long flags;
318*4882a593Smuzhiyun
319*4882a593Smuzhiyun spin_lock_irqsave(&uncore->lock, flags);
320*4882a593Smuzhiyun intel_uncore_posting_read_fw(uncore,
321*4882a593Smuzhiyun RING_HEAD(RENDER_RING_BASE));
322*4882a593Smuzhiyun spin_unlock_irqrestore(&uncore->lock, flags);
323*4882a593Smuzhiyun }
324*4882a593Smuzhiyun }
325*4882a593Smuzhiyun
intel_gt_chipset_flush(struct intel_gt * gt)326*4882a593Smuzhiyun void intel_gt_chipset_flush(struct intel_gt *gt)
327*4882a593Smuzhiyun {
328*4882a593Smuzhiyun wmb();
329*4882a593Smuzhiyun if (INTEL_GEN(gt->i915) < 6)
330*4882a593Smuzhiyun intel_gtt_chipset_flush();
331*4882a593Smuzhiyun }
332*4882a593Smuzhiyun
intel_gt_driver_register(struct intel_gt * gt)333*4882a593Smuzhiyun void intel_gt_driver_register(struct intel_gt *gt)
334*4882a593Smuzhiyun {
335*4882a593Smuzhiyun intel_rps_driver_register(>->rps);
336*4882a593Smuzhiyun
337*4882a593Smuzhiyun debugfs_gt_register(gt);
338*4882a593Smuzhiyun }
339*4882a593Smuzhiyun
intel_gt_init_scratch(struct intel_gt * gt,unsigned int size)340*4882a593Smuzhiyun static int intel_gt_init_scratch(struct intel_gt *gt, unsigned int size)
341*4882a593Smuzhiyun {
342*4882a593Smuzhiyun struct drm_i915_private *i915 = gt->i915;
343*4882a593Smuzhiyun struct drm_i915_gem_object *obj;
344*4882a593Smuzhiyun struct i915_vma *vma;
345*4882a593Smuzhiyun int ret;
346*4882a593Smuzhiyun
347*4882a593Smuzhiyun obj = i915_gem_object_create_stolen(i915, size);
348*4882a593Smuzhiyun if (IS_ERR(obj))
349*4882a593Smuzhiyun obj = i915_gem_object_create_internal(i915, size);
350*4882a593Smuzhiyun if (IS_ERR(obj)) {
351*4882a593Smuzhiyun DRM_ERROR("Failed to allocate scratch page\n");
352*4882a593Smuzhiyun return PTR_ERR(obj);
353*4882a593Smuzhiyun }
354*4882a593Smuzhiyun
355*4882a593Smuzhiyun vma = i915_vma_instance(obj, >->ggtt->vm, NULL);
356*4882a593Smuzhiyun if (IS_ERR(vma)) {
357*4882a593Smuzhiyun ret = PTR_ERR(vma);
358*4882a593Smuzhiyun goto err_unref;
359*4882a593Smuzhiyun }
360*4882a593Smuzhiyun
361*4882a593Smuzhiyun ret = i915_ggtt_pin(vma, NULL, 0, PIN_HIGH);
362*4882a593Smuzhiyun if (ret)
363*4882a593Smuzhiyun goto err_unref;
364*4882a593Smuzhiyun
365*4882a593Smuzhiyun gt->scratch = i915_vma_make_unshrinkable(vma);
366*4882a593Smuzhiyun
367*4882a593Smuzhiyun return 0;
368*4882a593Smuzhiyun
369*4882a593Smuzhiyun err_unref:
370*4882a593Smuzhiyun i915_gem_object_put(obj);
371*4882a593Smuzhiyun return ret;
372*4882a593Smuzhiyun }
373*4882a593Smuzhiyun
intel_gt_fini_scratch(struct intel_gt * gt)374*4882a593Smuzhiyun static void intel_gt_fini_scratch(struct intel_gt *gt)
375*4882a593Smuzhiyun {
376*4882a593Smuzhiyun i915_vma_unpin_and_release(>->scratch, 0);
377*4882a593Smuzhiyun }
378*4882a593Smuzhiyun
kernel_vm(struct intel_gt * gt)379*4882a593Smuzhiyun static struct i915_address_space *kernel_vm(struct intel_gt *gt)
380*4882a593Smuzhiyun {
381*4882a593Smuzhiyun if (INTEL_PPGTT(gt->i915) > INTEL_PPGTT_ALIASING)
382*4882a593Smuzhiyun return &i915_ppgtt_create(gt)->vm;
383*4882a593Smuzhiyun else
384*4882a593Smuzhiyun return i915_vm_get(>->ggtt->vm);
385*4882a593Smuzhiyun }
386*4882a593Smuzhiyun
__engines_record_defaults(struct intel_gt * gt)387*4882a593Smuzhiyun static int __engines_record_defaults(struct intel_gt *gt)
388*4882a593Smuzhiyun {
389*4882a593Smuzhiyun struct i915_request *requests[I915_NUM_ENGINES] = {};
390*4882a593Smuzhiyun struct intel_engine_cs *engine;
391*4882a593Smuzhiyun enum intel_engine_id id;
392*4882a593Smuzhiyun int err = 0;
393*4882a593Smuzhiyun
394*4882a593Smuzhiyun /*
395*4882a593Smuzhiyun * As we reset the gpu during very early sanitisation, the current
396*4882a593Smuzhiyun * register state on the GPU should reflect its defaults values.
397*4882a593Smuzhiyun * We load a context onto the hw (with restore-inhibit), then switch
398*4882a593Smuzhiyun * over to a second context to save that default register state. We
399*4882a593Smuzhiyun * can then prime every new context with that state so they all start
400*4882a593Smuzhiyun * from the same default HW values.
401*4882a593Smuzhiyun */
402*4882a593Smuzhiyun
403*4882a593Smuzhiyun for_each_engine(engine, gt, id) {
404*4882a593Smuzhiyun struct intel_renderstate so;
405*4882a593Smuzhiyun struct intel_context *ce;
406*4882a593Smuzhiyun struct i915_request *rq;
407*4882a593Smuzhiyun
408*4882a593Smuzhiyun /* We must be able to switch to something! */
409*4882a593Smuzhiyun GEM_BUG_ON(!engine->kernel_context);
410*4882a593Smuzhiyun
411*4882a593Smuzhiyun ce = intel_context_create(engine);
412*4882a593Smuzhiyun if (IS_ERR(ce)) {
413*4882a593Smuzhiyun err = PTR_ERR(ce);
414*4882a593Smuzhiyun goto out;
415*4882a593Smuzhiyun }
416*4882a593Smuzhiyun
417*4882a593Smuzhiyun err = intel_renderstate_init(&so, ce);
418*4882a593Smuzhiyun if (err)
419*4882a593Smuzhiyun goto err;
420*4882a593Smuzhiyun
421*4882a593Smuzhiyun rq = i915_request_create(ce);
422*4882a593Smuzhiyun if (IS_ERR(rq)) {
423*4882a593Smuzhiyun err = PTR_ERR(rq);
424*4882a593Smuzhiyun goto err_fini;
425*4882a593Smuzhiyun }
426*4882a593Smuzhiyun
427*4882a593Smuzhiyun err = intel_engine_emit_ctx_wa(rq);
428*4882a593Smuzhiyun if (err)
429*4882a593Smuzhiyun goto err_rq;
430*4882a593Smuzhiyun
431*4882a593Smuzhiyun err = intel_renderstate_emit(&so, rq);
432*4882a593Smuzhiyun if (err)
433*4882a593Smuzhiyun goto err_rq;
434*4882a593Smuzhiyun
435*4882a593Smuzhiyun err_rq:
436*4882a593Smuzhiyun requests[id] = i915_request_get(rq);
437*4882a593Smuzhiyun i915_request_add(rq);
438*4882a593Smuzhiyun err_fini:
439*4882a593Smuzhiyun intel_renderstate_fini(&so, ce);
440*4882a593Smuzhiyun err:
441*4882a593Smuzhiyun if (err) {
442*4882a593Smuzhiyun intel_context_put(ce);
443*4882a593Smuzhiyun goto out;
444*4882a593Smuzhiyun }
445*4882a593Smuzhiyun }
446*4882a593Smuzhiyun
447*4882a593Smuzhiyun /* Flush the default context image to memory, and enable powersaving. */
448*4882a593Smuzhiyun if (intel_gt_wait_for_idle(gt, I915_GEM_IDLE_TIMEOUT) == -ETIME) {
449*4882a593Smuzhiyun err = -EIO;
450*4882a593Smuzhiyun goto out;
451*4882a593Smuzhiyun }
452*4882a593Smuzhiyun
453*4882a593Smuzhiyun for (id = 0; id < ARRAY_SIZE(requests); id++) {
454*4882a593Smuzhiyun struct i915_request *rq;
455*4882a593Smuzhiyun struct file *state;
456*4882a593Smuzhiyun
457*4882a593Smuzhiyun rq = requests[id];
458*4882a593Smuzhiyun if (!rq)
459*4882a593Smuzhiyun continue;
460*4882a593Smuzhiyun
461*4882a593Smuzhiyun if (rq->fence.error) {
462*4882a593Smuzhiyun err = -EIO;
463*4882a593Smuzhiyun goto out;
464*4882a593Smuzhiyun }
465*4882a593Smuzhiyun
466*4882a593Smuzhiyun GEM_BUG_ON(!test_bit(CONTEXT_ALLOC_BIT, &rq->context->flags));
467*4882a593Smuzhiyun if (!rq->context->state)
468*4882a593Smuzhiyun continue;
469*4882a593Smuzhiyun
470*4882a593Smuzhiyun /* Keep a copy of the state's backing pages; free the obj */
471*4882a593Smuzhiyun state = shmem_create_from_object(rq->context->state->obj);
472*4882a593Smuzhiyun if (IS_ERR(state)) {
473*4882a593Smuzhiyun err = PTR_ERR(state);
474*4882a593Smuzhiyun goto out;
475*4882a593Smuzhiyun }
476*4882a593Smuzhiyun rq->engine->default_state = state;
477*4882a593Smuzhiyun }
478*4882a593Smuzhiyun
479*4882a593Smuzhiyun out:
480*4882a593Smuzhiyun /*
481*4882a593Smuzhiyun * If we have to abandon now, we expect the engines to be idle
482*4882a593Smuzhiyun * and ready to be torn-down. The quickest way we can accomplish
483*4882a593Smuzhiyun * this is by declaring ourselves wedged.
484*4882a593Smuzhiyun */
485*4882a593Smuzhiyun if (err)
486*4882a593Smuzhiyun intel_gt_set_wedged(gt);
487*4882a593Smuzhiyun
488*4882a593Smuzhiyun for (id = 0; id < ARRAY_SIZE(requests); id++) {
489*4882a593Smuzhiyun struct intel_context *ce;
490*4882a593Smuzhiyun struct i915_request *rq;
491*4882a593Smuzhiyun
492*4882a593Smuzhiyun rq = requests[id];
493*4882a593Smuzhiyun if (!rq)
494*4882a593Smuzhiyun continue;
495*4882a593Smuzhiyun
496*4882a593Smuzhiyun ce = rq->context;
497*4882a593Smuzhiyun i915_request_put(rq);
498*4882a593Smuzhiyun intel_context_put(ce);
499*4882a593Smuzhiyun }
500*4882a593Smuzhiyun return err;
501*4882a593Smuzhiyun }
502*4882a593Smuzhiyun
__engines_verify_workarounds(struct intel_gt * gt)503*4882a593Smuzhiyun static int __engines_verify_workarounds(struct intel_gt *gt)
504*4882a593Smuzhiyun {
505*4882a593Smuzhiyun struct intel_engine_cs *engine;
506*4882a593Smuzhiyun enum intel_engine_id id;
507*4882a593Smuzhiyun int err = 0;
508*4882a593Smuzhiyun
509*4882a593Smuzhiyun if (!IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
510*4882a593Smuzhiyun return 0;
511*4882a593Smuzhiyun
512*4882a593Smuzhiyun for_each_engine(engine, gt, id) {
513*4882a593Smuzhiyun if (intel_engine_verify_workarounds(engine, "load"))
514*4882a593Smuzhiyun err = -EIO;
515*4882a593Smuzhiyun }
516*4882a593Smuzhiyun
517*4882a593Smuzhiyun /* Flush and restore the kernel context for safety */
518*4882a593Smuzhiyun if (intel_gt_wait_for_idle(gt, I915_GEM_IDLE_TIMEOUT) == -ETIME)
519*4882a593Smuzhiyun err = -EIO;
520*4882a593Smuzhiyun
521*4882a593Smuzhiyun return err;
522*4882a593Smuzhiyun }
523*4882a593Smuzhiyun
__intel_gt_disable(struct intel_gt * gt)524*4882a593Smuzhiyun static void __intel_gt_disable(struct intel_gt *gt)
525*4882a593Smuzhiyun {
526*4882a593Smuzhiyun intel_gt_set_wedged_on_fini(gt);
527*4882a593Smuzhiyun
528*4882a593Smuzhiyun intel_gt_suspend_prepare(gt);
529*4882a593Smuzhiyun intel_gt_suspend_late(gt);
530*4882a593Smuzhiyun
531*4882a593Smuzhiyun GEM_BUG_ON(intel_gt_pm_is_awake(gt));
532*4882a593Smuzhiyun }
533*4882a593Smuzhiyun
intel_gt_init(struct intel_gt * gt)534*4882a593Smuzhiyun int intel_gt_init(struct intel_gt *gt)
535*4882a593Smuzhiyun {
536*4882a593Smuzhiyun int err;
537*4882a593Smuzhiyun
538*4882a593Smuzhiyun err = i915_inject_probe_error(gt->i915, -ENODEV);
539*4882a593Smuzhiyun if (err)
540*4882a593Smuzhiyun return err;
541*4882a593Smuzhiyun
542*4882a593Smuzhiyun /*
543*4882a593Smuzhiyun * This is just a security blanket to placate dragons.
544*4882a593Smuzhiyun * On some systems, we very sporadically observe that the first TLBs
545*4882a593Smuzhiyun * used by the CS may be stale, despite us poking the TLB reset. If
546*4882a593Smuzhiyun * we hold the forcewake during initialisation these problems
547*4882a593Smuzhiyun * just magically go away.
548*4882a593Smuzhiyun */
549*4882a593Smuzhiyun intel_uncore_forcewake_get(gt->uncore, FORCEWAKE_ALL);
550*4882a593Smuzhiyun
551*4882a593Smuzhiyun intel_gt_init_clock_frequency(gt);
552*4882a593Smuzhiyun
553*4882a593Smuzhiyun err = intel_gt_init_scratch(gt, IS_GEN(gt->i915, 2) ? SZ_256K : SZ_4K);
554*4882a593Smuzhiyun if (err)
555*4882a593Smuzhiyun goto out_fw;
556*4882a593Smuzhiyun
557*4882a593Smuzhiyun intel_gt_pm_init(gt);
558*4882a593Smuzhiyun
559*4882a593Smuzhiyun gt->vm = kernel_vm(gt);
560*4882a593Smuzhiyun if (!gt->vm) {
561*4882a593Smuzhiyun err = -ENOMEM;
562*4882a593Smuzhiyun goto err_pm;
563*4882a593Smuzhiyun }
564*4882a593Smuzhiyun
565*4882a593Smuzhiyun err = intel_engines_init(gt);
566*4882a593Smuzhiyun if (err)
567*4882a593Smuzhiyun goto err_engines;
568*4882a593Smuzhiyun
569*4882a593Smuzhiyun err = intel_uc_init(>->uc);
570*4882a593Smuzhiyun if (err)
571*4882a593Smuzhiyun goto err_engines;
572*4882a593Smuzhiyun
573*4882a593Smuzhiyun err = intel_gt_resume(gt);
574*4882a593Smuzhiyun if (err)
575*4882a593Smuzhiyun goto err_uc_init;
576*4882a593Smuzhiyun
577*4882a593Smuzhiyun err = __engines_record_defaults(gt);
578*4882a593Smuzhiyun if (err)
579*4882a593Smuzhiyun goto err_gt;
580*4882a593Smuzhiyun
581*4882a593Smuzhiyun err = __engines_verify_workarounds(gt);
582*4882a593Smuzhiyun if (err)
583*4882a593Smuzhiyun goto err_gt;
584*4882a593Smuzhiyun
585*4882a593Smuzhiyun err = i915_inject_probe_error(gt->i915, -EIO);
586*4882a593Smuzhiyun if (err)
587*4882a593Smuzhiyun goto err_gt;
588*4882a593Smuzhiyun
589*4882a593Smuzhiyun goto out_fw;
590*4882a593Smuzhiyun err_gt:
591*4882a593Smuzhiyun __intel_gt_disable(gt);
592*4882a593Smuzhiyun intel_uc_fini_hw(>->uc);
593*4882a593Smuzhiyun err_uc_init:
594*4882a593Smuzhiyun intel_uc_fini(>->uc);
595*4882a593Smuzhiyun err_engines:
596*4882a593Smuzhiyun intel_engines_release(gt);
597*4882a593Smuzhiyun i915_vm_put(fetch_and_zero(>->vm));
598*4882a593Smuzhiyun err_pm:
599*4882a593Smuzhiyun intel_gt_pm_fini(gt);
600*4882a593Smuzhiyun intel_gt_fini_scratch(gt);
601*4882a593Smuzhiyun out_fw:
602*4882a593Smuzhiyun if (err)
603*4882a593Smuzhiyun intel_gt_set_wedged_on_init(gt);
604*4882a593Smuzhiyun intel_uncore_forcewake_put(gt->uncore, FORCEWAKE_ALL);
605*4882a593Smuzhiyun return err;
606*4882a593Smuzhiyun }
607*4882a593Smuzhiyun
intel_gt_driver_remove(struct intel_gt * gt)608*4882a593Smuzhiyun void intel_gt_driver_remove(struct intel_gt *gt)
609*4882a593Smuzhiyun {
610*4882a593Smuzhiyun __intel_gt_disable(gt);
611*4882a593Smuzhiyun
612*4882a593Smuzhiyun intel_uc_driver_remove(>->uc);
613*4882a593Smuzhiyun
614*4882a593Smuzhiyun intel_engines_release(gt);
615*4882a593Smuzhiyun }
616*4882a593Smuzhiyun
intel_gt_driver_unregister(struct intel_gt * gt)617*4882a593Smuzhiyun void intel_gt_driver_unregister(struct intel_gt *gt)
618*4882a593Smuzhiyun {
619*4882a593Smuzhiyun intel_rps_driver_unregister(>->rps);
620*4882a593Smuzhiyun
621*4882a593Smuzhiyun /*
622*4882a593Smuzhiyun * Upon unregistering the device to prevent any new users, cancel
623*4882a593Smuzhiyun * all in-flight requests so that we can quickly unbind the active
624*4882a593Smuzhiyun * resources.
625*4882a593Smuzhiyun */
626*4882a593Smuzhiyun intel_gt_set_wedged(gt);
627*4882a593Smuzhiyun }
628*4882a593Smuzhiyun
intel_gt_driver_release(struct intel_gt * gt)629*4882a593Smuzhiyun void intel_gt_driver_release(struct intel_gt *gt)
630*4882a593Smuzhiyun {
631*4882a593Smuzhiyun struct i915_address_space *vm;
632*4882a593Smuzhiyun intel_wakeref_t wakeref;
633*4882a593Smuzhiyun
634*4882a593Smuzhiyun /* Scrub all HW state upon release */
635*4882a593Smuzhiyun with_intel_runtime_pm(gt->uncore->rpm, wakeref)
636*4882a593Smuzhiyun __intel_gt_reset(gt, ALL_ENGINES);
637*4882a593Smuzhiyun
638*4882a593Smuzhiyun vm = fetch_and_zero(>->vm);
639*4882a593Smuzhiyun if (vm) /* FIXME being called twice on error paths :( */
640*4882a593Smuzhiyun i915_vm_put(vm);
641*4882a593Smuzhiyun
642*4882a593Smuzhiyun intel_gt_pm_fini(gt);
643*4882a593Smuzhiyun intel_gt_fini_scratch(gt);
644*4882a593Smuzhiyun intel_gt_fini_buffer_pool(gt);
645*4882a593Smuzhiyun }
646*4882a593Smuzhiyun
intel_gt_driver_late_release(struct intel_gt * gt)647*4882a593Smuzhiyun void intel_gt_driver_late_release(struct intel_gt *gt)
648*4882a593Smuzhiyun {
649*4882a593Smuzhiyun /* We need to wait for inflight RCU frees to release their grip */
650*4882a593Smuzhiyun rcu_barrier();
651*4882a593Smuzhiyun
652*4882a593Smuzhiyun intel_uc_driver_late_release(>->uc);
653*4882a593Smuzhiyun intel_gt_fini_requests(gt);
654*4882a593Smuzhiyun intel_gt_fini_reset(gt);
655*4882a593Smuzhiyun intel_gt_fini_timelines(gt);
656*4882a593Smuzhiyun intel_engines_free(gt);
657*4882a593Smuzhiyun }
658*4882a593Smuzhiyun
intel_gt_info_print(const struct intel_gt_info * info,struct drm_printer * p)659*4882a593Smuzhiyun void intel_gt_info_print(const struct intel_gt_info *info,
660*4882a593Smuzhiyun struct drm_printer *p)
661*4882a593Smuzhiyun {
662*4882a593Smuzhiyun drm_printf(p, "available engines: %x\n", info->engine_mask);
663*4882a593Smuzhiyun
664*4882a593Smuzhiyun intel_sseu_dump(&info->sseu, p);
665*4882a593Smuzhiyun }
666*4882a593Smuzhiyun
667*4882a593Smuzhiyun struct reg_and_bit {
668*4882a593Smuzhiyun i915_reg_t reg;
669*4882a593Smuzhiyun u32 bit;
670*4882a593Smuzhiyun };
671*4882a593Smuzhiyun
672*4882a593Smuzhiyun static struct reg_and_bit
get_reg_and_bit(const struct intel_engine_cs * engine,const bool gen8,const i915_reg_t * regs,const unsigned int num)673*4882a593Smuzhiyun get_reg_and_bit(const struct intel_engine_cs *engine, const bool gen8,
674*4882a593Smuzhiyun const i915_reg_t *regs, const unsigned int num)
675*4882a593Smuzhiyun {
676*4882a593Smuzhiyun const unsigned int class = engine->class;
677*4882a593Smuzhiyun struct reg_and_bit rb = { };
678*4882a593Smuzhiyun
679*4882a593Smuzhiyun if (drm_WARN_ON_ONCE(&engine->i915->drm,
680*4882a593Smuzhiyun class >= num || !regs[class].reg))
681*4882a593Smuzhiyun return rb;
682*4882a593Smuzhiyun
683*4882a593Smuzhiyun rb.reg = regs[class];
684*4882a593Smuzhiyun if (gen8 && class == VIDEO_DECODE_CLASS)
685*4882a593Smuzhiyun rb.reg.reg += 4 * engine->instance; /* GEN8_M2TCR */
686*4882a593Smuzhiyun else
687*4882a593Smuzhiyun rb.bit = engine->instance;
688*4882a593Smuzhiyun
689*4882a593Smuzhiyun rb.bit = BIT(rb.bit);
690*4882a593Smuzhiyun
691*4882a593Smuzhiyun return rb;
692*4882a593Smuzhiyun }
693*4882a593Smuzhiyun
intel_gt_invalidate_tlbs(struct intel_gt * gt)694*4882a593Smuzhiyun void intel_gt_invalidate_tlbs(struct intel_gt *gt)
695*4882a593Smuzhiyun {
696*4882a593Smuzhiyun static const i915_reg_t gen8_regs[] = {
697*4882a593Smuzhiyun [RENDER_CLASS] = GEN8_RTCR,
698*4882a593Smuzhiyun [VIDEO_DECODE_CLASS] = GEN8_M1TCR, /* , GEN8_M2TCR */
699*4882a593Smuzhiyun [VIDEO_ENHANCEMENT_CLASS] = GEN8_VTCR,
700*4882a593Smuzhiyun [COPY_ENGINE_CLASS] = GEN8_BTCR,
701*4882a593Smuzhiyun };
702*4882a593Smuzhiyun static const i915_reg_t gen12_regs[] = {
703*4882a593Smuzhiyun [RENDER_CLASS] = GEN12_GFX_TLB_INV_CR,
704*4882a593Smuzhiyun [VIDEO_DECODE_CLASS] = GEN12_VD_TLB_INV_CR,
705*4882a593Smuzhiyun [VIDEO_ENHANCEMENT_CLASS] = GEN12_VE_TLB_INV_CR,
706*4882a593Smuzhiyun [COPY_ENGINE_CLASS] = GEN12_BLT_TLB_INV_CR,
707*4882a593Smuzhiyun };
708*4882a593Smuzhiyun struct drm_i915_private *i915 = gt->i915;
709*4882a593Smuzhiyun struct intel_uncore *uncore = gt->uncore;
710*4882a593Smuzhiyun struct intel_engine_cs *engine;
711*4882a593Smuzhiyun enum intel_engine_id id;
712*4882a593Smuzhiyun const i915_reg_t *regs;
713*4882a593Smuzhiyun unsigned int num = 0;
714*4882a593Smuzhiyun
715*4882a593Smuzhiyun if (I915_SELFTEST_ONLY(gt->awake == -ENODEV))
716*4882a593Smuzhiyun return;
717*4882a593Smuzhiyun
718*4882a593Smuzhiyun if (INTEL_GEN(i915) == 12) {
719*4882a593Smuzhiyun regs = gen12_regs;
720*4882a593Smuzhiyun num = ARRAY_SIZE(gen12_regs);
721*4882a593Smuzhiyun } else if (INTEL_GEN(i915) >= 8 && INTEL_GEN(i915) <= 11) {
722*4882a593Smuzhiyun regs = gen8_regs;
723*4882a593Smuzhiyun num = ARRAY_SIZE(gen8_regs);
724*4882a593Smuzhiyun } else if (INTEL_GEN(i915) < 8) {
725*4882a593Smuzhiyun return;
726*4882a593Smuzhiyun }
727*4882a593Smuzhiyun
728*4882a593Smuzhiyun if (drm_WARN_ONCE(&i915->drm, !num,
729*4882a593Smuzhiyun "Platform does not implement TLB invalidation!"))
730*4882a593Smuzhiyun return;
731*4882a593Smuzhiyun
732*4882a593Smuzhiyun GEM_TRACE("\n");
733*4882a593Smuzhiyun
734*4882a593Smuzhiyun assert_rpm_wakelock_held(&i915->runtime_pm);
735*4882a593Smuzhiyun
736*4882a593Smuzhiyun mutex_lock(>->tlb_invalidate_lock);
737*4882a593Smuzhiyun intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL);
738*4882a593Smuzhiyun
739*4882a593Smuzhiyun spin_lock_irq(&uncore->lock); /* serialise invalidate with GT reset */
740*4882a593Smuzhiyun
741*4882a593Smuzhiyun for_each_engine(engine, gt, id) {
742*4882a593Smuzhiyun struct reg_and_bit rb;
743*4882a593Smuzhiyun
744*4882a593Smuzhiyun rb = get_reg_and_bit(engine, regs == gen8_regs, regs, num);
745*4882a593Smuzhiyun if (!i915_mmio_reg_offset(rb.reg))
746*4882a593Smuzhiyun continue;
747*4882a593Smuzhiyun
748*4882a593Smuzhiyun if (INTEL_GEN(i915) == 12 && (engine->class == VIDEO_DECODE_CLASS ||
749*4882a593Smuzhiyun engine->class == VIDEO_ENHANCEMENT_CLASS))
750*4882a593Smuzhiyun rb.bit = _MASKED_BIT_ENABLE(rb.bit);
751*4882a593Smuzhiyun
752*4882a593Smuzhiyun intel_uncore_write_fw(uncore, rb.reg, rb.bit);
753*4882a593Smuzhiyun }
754*4882a593Smuzhiyun
755*4882a593Smuzhiyun spin_unlock_irq(&uncore->lock);
756*4882a593Smuzhiyun
757*4882a593Smuzhiyun for_each_engine(engine, gt, id) {
758*4882a593Smuzhiyun /*
759*4882a593Smuzhiyun * HW architecture suggest typical invalidation time at 40us,
760*4882a593Smuzhiyun * with pessimistic cases up to 100us and a recommendation to
761*4882a593Smuzhiyun * cap at 1ms. We go a bit higher just in case.
762*4882a593Smuzhiyun */
763*4882a593Smuzhiyun const unsigned int timeout_us = 100;
764*4882a593Smuzhiyun const unsigned int timeout_ms = 4;
765*4882a593Smuzhiyun struct reg_and_bit rb;
766*4882a593Smuzhiyun
767*4882a593Smuzhiyun rb = get_reg_and_bit(engine, regs == gen8_regs, regs, num);
768*4882a593Smuzhiyun if (!i915_mmio_reg_offset(rb.reg))
769*4882a593Smuzhiyun continue;
770*4882a593Smuzhiyun
771*4882a593Smuzhiyun if (__intel_wait_for_register_fw(uncore,
772*4882a593Smuzhiyun rb.reg, rb.bit, 0,
773*4882a593Smuzhiyun timeout_us, timeout_ms,
774*4882a593Smuzhiyun NULL))
775*4882a593Smuzhiyun drm_err_ratelimited(>->i915->drm,
776*4882a593Smuzhiyun "%s TLB invalidation did not complete in %ums!\n",
777*4882a593Smuzhiyun engine->name, timeout_ms);
778*4882a593Smuzhiyun }
779*4882a593Smuzhiyun
780*4882a593Smuzhiyun intel_uncore_forcewake_put_delayed(uncore, FORCEWAKE_ALL);
781*4882a593Smuzhiyun mutex_unlock(>->tlb_invalidate_lock);
782*4882a593Smuzhiyun }
783