1*4882a593Smuzhiyun /* SPDX-License-Identifier: MIT */
2*4882a593Smuzhiyun #ifndef _INTEL_RINGBUFFER_H_
3*4882a593Smuzhiyun #define _INTEL_RINGBUFFER_H_
4*4882a593Smuzhiyun
5*4882a593Smuzhiyun #include <drm/drm_util.h>
6*4882a593Smuzhiyun
7*4882a593Smuzhiyun #include <linux/hashtable.h>
8*4882a593Smuzhiyun #include <linux/irq_work.h>
9*4882a593Smuzhiyun #include <linux/random.h>
10*4882a593Smuzhiyun #include <linux/seqlock.h>
11*4882a593Smuzhiyun
12*4882a593Smuzhiyun #include "i915_pmu.h"
13*4882a593Smuzhiyun #include "i915_reg.h"
14*4882a593Smuzhiyun #include "i915_request.h"
15*4882a593Smuzhiyun #include "i915_selftest.h"
16*4882a593Smuzhiyun #include "gt/intel_timeline.h"
17*4882a593Smuzhiyun #include "intel_engine_types.h"
18*4882a593Smuzhiyun #include "intel_gpu_commands.h"
19*4882a593Smuzhiyun #include "intel_workarounds.h"
20*4882a593Smuzhiyun
21*4882a593Smuzhiyun struct drm_printer;
22*4882a593Smuzhiyun struct intel_gt;
23*4882a593Smuzhiyun
24*4882a593Smuzhiyun /* Early gen2 devices have a cacheline of just 32 bytes, using 64 is overkill,
25*4882a593Smuzhiyun * but keeps the logic simple. Indeed, the whole purpose of this macro is just
26*4882a593Smuzhiyun * to give some inclination as to some of the magic values used in the various
27*4882a593Smuzhiyun * workarounds!
28*4882a593Smuzhiyun */
29*4882a593Smuzhiyun #define CACHELINE_BYTES 64
30*4882a593Smuzhiyun #define CACHELINE_DWORDS (CACHELINE_BYTES / sizeof(u32))
31*4882a593Smuzhiyun
32*4882a593Smuzhiyun #define ENGINE_TRACE(e, fmt, ...) do { \
33*4882a593Smuzhiyun const struct intel_engine_cs *e__ __maybe_unused = (e); \
34*4882a593Smuzhiyun GEM_TRACE("%s %s: " fmt, \
35*4882a593Smuzhiyun dev_name(e__->i915->drm.dev), e__->name, \
36*4882a593Smuzhiyun ##__VA_ARGS__); \
37*4882a593Smuzhiyun } while (0)
38*4882a593Smuzhiyun
39*4882a593Smuzhiyun /*
40*4882a593Smuzhiyun * The register defines to be used with the following macros need to accept a
41*4882a593Smuzhiyun * base param, e.g:
42*4882a593Smuzhiyun *
43*4882a593Smuzhiyun * REG_FOO(base) _MMIO((base) + <relative offset>)
44*4882a593Smuzhiyun * ENGINE_READ(engine, REG_FOO);
45*4882a593Smuzhiyun *
46*4882a593Smuzhiyun * register arrays are to be defined and accessed as follows:
47*4882a593Smuzhiyun *
48*4882a593Smuzhiyun * REG_BAR(base, i) _MMIO((base) + <relative offset> + (i) * <shift>)
49*4882a593Smuzhiyun * ENGINE_READ_IDX(engine, REG_BAR, i)
50*4882a593Smuzhiyun */
51*4882a593Smuzhiyun
52*4882a593Smuzhiyun #define __ENGINE_REG_OP(op__, engine__, ...) \
53*4882a593Smuzhiyun intel_uncore_##op__((engine__)->uncore, __VA_ARGS__)
54*4882a593Smuzhiyun
55*4882a593Smuzhiyun #define __ENGINE_READ_OP(op__, engine__, reg__) \
56*4882a593Smuzhiyun __ENGINE_REG_OP(op__, (engine__), reg__((engine__)->mmio_base))
57*4882a593Smuzhiyun
58*4882a593Smuzhiyun #define ENGINE_READ16(...) __ENGINE_READ_OP(read16, __VA_ARGS__)
59*4882a593Smuzhiyun #define ENGINE_READ(...) __ENGINE_READ_OP(read, __VA_ARGS__)
60*4882a593Smuzhiyun #define ENGINE_READ_FW(...) __ENGINE_READ_OP(read_fw, __VA_ARGS__)
61*4882a593Smuzhiyun #define ENGINE_POSTING_READ(...) __ENGINE_READ_OP(posting_read_fw, __VA_ARGS__)
62*4882a593Smuzhiyun #define ENGINE_POSTING_READ16(...) __ENGINE_READ_OP(posting_read16, __VA_ARGS__)
63*4882a593Smuzhiyun
64*4882a593Smuzhiyun #define ENGINE_READ64(engine__, lower_reg__, upper_reg__) \
65*4882a593Smuzhiyun __ENGINE_REG_OP(read64_2x32, (engine__), \
66*4882a593Smuzhiyun lower_reg__((engine__)->mmio_base), \
67*4882a593Smuzhiyun upper_reg__((engine__)->mmio_base))
68*4882a593Smuzhiyun
69*4882a593Smuzhiyun #define ENGINE_READ_IDX(engine__, reg__, idx__) \
70*4882a593Smuzhiyun __ENGINE_REG_OP(read, (engine__), reg__((engine__)->mmio_base, (idx__)))
71*4882a593Smuzhiyun
72*4882a593Smuzhiyun #define __ENGINE_WRITE_OP(op__, engine__, reg__, val__) \
73*4882a593Smuzhiyun __ENGINE_REG_OP(op__, (engine__), reg__((engine__)->mmio_base), (val__))
74*4882a593Smuzhiyun
75*4882a593Smuzhiyun #define ENGINE_WRITE16(...) __ENGINE_WRITE_OP(write16, __VA_ARGS__)
76*4882a593Smuzhiyun #define ENGINE_WRITE(...) __ENGINE_WRITE_OP(write, __VA_ARGS__)
77*4882a593Smuzhiyun #define ENGINE_WRITE_FW(...) __ENGINE_WRITE_OP(write_fw, __VA_ARGS__)
78*4882a593Smuzhiyun
79*4882a593Smuzhiyun #define GEN6_RING_FAULT_REG_READ(engine__) \
80*4882a593Smuzhiyun intel_uncore_read((engine__)->uncore, RING_FAULT_REG(engine__))
81*4882a593Smuzhiyun
82*4882a593Smuzhiyun #define GEN6_RING_FAULT_REG_POSTING_READ(engine__) \
83*4882a593Smuzhiyun intel_uncore_posting_read((engine__)->uncore, RING_FAULT_REG(engine__))
84*4882a593Smuzhiyun
85*4882a593Smuzhiyun #define GEN6_RING_FAULT_REG_RMW(engine__, clear__, set__) \
86*4882a593Smuzhiyun ({ \
87*4882a593Smuzhiyun u32 __val; \
88*4882a593Smuzhiyun \
89*4882a593Smuzhiyun __val = intel_uncore_read((engine__)->uncore, \
90*4882a593Smuzhiyun RING_FAULT_REG(engine__)); \
91*4882a593Smuzhiyun __val &= ~(clear__); \
92*4882a593Smuzhiyun __val |= (set__); \
93*4882a593Smuzhiyun intel_uncore_write((engine__)->uncore, RING_FAULT_REG(engine__), \
94*4882a593Smuzhiyun __val); \
95*4882a593Smuzhiyun })
96*4882a593Smuzhiyun
97*4882a593Smuzhiyun /* seqno size is actually only a uint32, but since we plan to use MI_FLUSH_DW to
98*4882a593Smuzhiyun * do the writes, and that must have qw aligned offsets, simply pretend it's 8b.
99*4882a593Smuzhiyun */
100*4882a593Smuzhiyun
101*4882a593Smuzhiyun static inline unsigned int
execlists_num_ports(const struct intel_engine_execlists * const execlists)102*4882a593Smuzhiyun execlists_num_ports(const struct intel_engine_execlists * const execlists)
103*4882a593Smuzhiyun {
104*4882a593Smuzhiyun return execlists->port_mask + 1;
105*4882a593Smuzhiyun }
106*4882a593Smuzhiyun
107*4882a593Smuzhiyun static inline struct i915_request *
execlists_active(const struct intel_engine_execlists * execlists)108*4882a593Smuzhiyun execlists_active(const struct intel_engine_execlists *execlists)
109*4882a593Smuzhiyun {
110*4882a593Smuzhiyun struct i915_request * const *cur, * const *old, *active;
111*4882a593Smuzhiyun
112*4882a593Smuzhiyun cur = READ_ONCE(execlists->active);
113*4882a593Smuzhiyun smp_rmb(); /* pairs with overwrite protection in process_csb() */
114*4882a593Smuzhiyun do {
115*4882a593Smuzhiyun old = cur;
116*4882a593Smuzhiyun
117*4882a593Smuzhiyun active = READ_ONCE(*cur);
118*4882a593Smuzhiyun cur = READ_ONCE(execlists->active);
119*4882a593Smuzhiyun
120*4882a593Smuzhiyun smp_rmb(); /* and complete the seqlock retry */
121*4882a593Smuzhiyun } while (unlikely(cur != old));
122*4882a593Smuzhiyun
123*4882a593Smuzhiyun return active;
124*4882a593Smuzhiyun }
125*4882a593Smuzhiyun
126*4882a593Smuzhiyun static inline void
execlists_active_lock_bh(struct intel_engine_execlists * execlists)127*4882a593Smuzhiyun execlists_active_lock_bh(struct intel_engine_execlists *execlists)
128*4882a593Smuzhiyun {
129*4882a593Smuzhiyun local_bh_disable(); /* prevent local softirq and lock recursion */
130*4882a593Smuzhiyun tasklet_lock(&execlists->tasklet);
131*4882a593Smuzhiyun }
132*4882a593Smuzhiyun
133*4882a593Smuzhiyun static inline void
execlists_active_unlock_bh(struct intel_engine_execlists * execlists)134*4882a593Smuzhiyun execlists_active_unlock_bh(struct intel_engine_execlists *execlists)
135*4882a593Smuzhiyun {
136*4882a593Smuzhiyun tasklet_unlock(&execlists->tasklet);
137*4882a593Smuzhiyun local_bh_enable(); /* restore softirq, and kick ksoftirqd! */
138*4882a593Smuzhiyun }
139*4882a593Smuzhiyun
140*4882a593Smuzhiyun struct i915_request *
141*4882a593Smuzhiyun execlists_unwind_incomplete_requests(struct intel_engine_execlists *execlists);
142*4882a593Smuzhiyun
143*4882a593Smuzhiyun static inline u32
intel_read_status_page(const struct intel_engine_cs * engine,int reg)144*4882a593Smuzhiyun intel_read_status_page(const struct intel_engine_cs *engine, int reg)
145*4882a593Smuzhiyun {
146*4882a593Smuzhiyun /* Ensure that the compiler doesn't optimize away the load. */
147*4882a593Smuzhiyun return READ_ONCE(engine->status_page.addr[reg]);
148*4882a593Smuzhiyun }
149*4882a593Smuzhiyun
150*4882a593Smuzhiyun static inline void
intel_write_status_page(struct intel_engine_cs * engine,int reg,u32 value)151*4882a593Smuzhiyun intel_write_status_page(struct intel_engine_cs *engine, int reg, u32 value)
152*4882a593Smuzhiyun {
153*4882a593Smuzhiyun /* Writing into the status page should be done sparingly. Since
154*4882a593Smuzhiyun * we do when we are uncertain of the device state, we take a bit
155*4882a593Smuzhiyun * of extra paranoia to try and ensure that the HWS takes the value
156*4882a593Smuzhiyun * we give and that it doesn't end up trapped inside the CPU!
157*4882a593Smuzhiyun */
158*4882a593Smuzhiyun if (static_cpu_has(X86_FEATURE_CLFLUSH)) {
159*4882a593Smuzhiyun mb();
160*4882a593Smuzhiyun clflush(&engine->status_page.addr[reg]);
161*4882a593Smuzhiyun engine->status_page.addr[reg] = value;
162*4882a593Smuzhiyun clflush(&engine->status_page.addr[reg]);
163*4882a593Smuzhiyun mb();
164*4882a593Smuzhiyun } else {
165*4882a593Smuzhiyun WRITE_ONCE(engine->status_page.addr[reg], value);
166*4882a593Smuzhiyun }
167*4882a593Smuzhiyun }
168*4882a593Smuzhiyun
169*4882a593Smuzhiyun /*
170*4882a593Smuzhiyun * Reads a dword out of the status page, which is written to from the command
171*4882a593Smuzhiyun * queue by automatic updates, MI_REPORT_HEAD, MI_STORE_DATA_INDEX, or
172*4882a593Smuzhiyun * MI_STORE_DATA_IMM.
173*4882a593Smuzhiyun *
174*4882a593Smuzhiyun * The following dwords have a reserved meaning:
175*4882a593Smuzhiyun * 0x00: ISR copy, updated when an ISR bit not set in the HWSTAM changes.
176*4882a593Smuzhiyun * 0x04: ring 0 head pointer
177*4882a593Smuzhiyun * 0x05: ring 1 head pointer (915-class)
178*4882a593Smuzhiyun * 0x06: ring 2 head pointer (915-class)
179*4882a593Smuzhiyun * 0x10-0x1b: Context status DWords (GM45)
180*4882a593Smuzhiyun * 0x1f: Last written status offset. (GM45)
181*4882a593Smuzhiyun * 0x20-0x2f: Reserved (Gen6+)
182*4882a593Smuzhiyun *
183*4882a593Smuzhiyun * The area from dword 0x30 to 0x3ff is available for driver usage.
184*4882a593Smuzhiyun */
185*4882a593Smuzhiyun #define I915_GEM_HWS_PREEMPT 0x32
186*4882a593Smuzhiyun #define I915_GEM_HWS_PREEMPT_ADDR (I915_GEM_HWS_PREEMPT * sizeof(u32))
187*4882a593Smuzhiyun #define I915_GEM_HWS_SEQNO 0x40
188*4882a593Smuzhiyun #define I915_GEM_HWS_SEQNO_ADDR (I915_GEM_HWS_SEQNO * sizeof(u32))
189*4882a593Smuzhiyun #define I915_GEM_HWS_SCRATCH 0x80
190*4882a593Smuzhiyun
191*4882a593Smuzhiyun #define I915_HWS_CSB_BUF0_INDEX 0x10
192*4882a593Smuzhiyun #define I915_HWS_CSB_WRITE_INDEX 0x1f
193*4882a593Smuzhiyun #define CNL_HWS_CSB_WRITE_INDEX 0x2f
194*4882a593Smuzhiyun
195*4882a593Smuzhiyun void intel_engine_stop(struct intel_engine_cs *engine);
196*4882a593Smuzhiyun void intel_engine_cleanup(struct intel_engine_cs *engine);
197*4882a593Smuzhiyun
198*4882a593Smuzhiyun int intel_engines_init_mmio(struct intel_gt *gt);
199*4882a593Smuzhiyun int intel_engines_init(struct intel_gt *gt);
200*4882a593Smuzhiyun
201*4882a593Smuzhiyun void intel_engine_free_request_pool(struct intel_engine_cs *engine);
202*4882a593Smuzhiyun
203*4882a593Smuzhiyun void intel_engines_release(struct intel_gt *gt);
204*4882a593Smuzhiyun void intel_engines_free(struct intel_gt *gt);
205*4882a593Smuzhiyun
206*4882a593Smuzhiyun int intel_engine_init_common(struct intel_engine_cs *engine);
207*4882a593Smuzhiyun void intel_engine_cleanup_common(struct intel_engine_cs *engine);
208*4882a593Smuzhiyun
209*4882a593Smuzhiyun int intel_engine_resume(struct intel_engine_cs *engine);
210*4882a593Smuzhiyun
211*4882a593Smuzhiyun int intel_ring_submission_setup(struct intel_engine_cs *engine);
212*4882a593Smuzhiyun
213*4882a593Smuzhiyun int intel_engine_stop_cs(struct intel_engine_cs *engine);
214*4882a593Smuzhiyun void intel_engine_cancel_stop_cs(struct intel_engine_cs *engine);
215*4882a593Smuzhiyun
216*4882a593Smuzhiyun void intel_engine_set_hwsp_writemask(struct intel_engine_cs *engine, u32 mask);
217*4882a593Smuzhiyun
218*4882a593Smuzhiyun u64 intel_engine_get_active_head(const struct intel_engine_cs *engine);
219*4882a593Smuzhiyun u64 intel_engine_get_last_batch_head(const struct intel_engine_cs *engine);
220*4882a593Smuzhiyun
221*4882a593Smuzhiyun void intel_engine_get_instdone(const struct intel_engine_cs *engine,
222*4882a593Smuzhiyun struct intel_instdone *instdone);
223*4882a593Smuzhiyun
224*4882a593Smuzhiyun void intel_engine_init_execlists(struct intel_engine_cs *engine);
225*4882a593Smuzhiyun
__gen8_emit_pipe_control(u32 * batch,u32 flags0,u32 flags1,u32 offset)226*4882a593Smuzhiyun static inline u32 *__gen8_emit_pipe_control(u32 *batch, u32 flags0, u32 flags1, u32 offset)
227*4882a593Smuzhiyun {
228*4882a593Smuzhiyun memset(batch, 0, 6 * sizeof(u32));
229*4882a593Smuzhiyun
230*4882a593Smuzhiyun batch[0] = GFX_OP_PIPE_CONTROL(6) | flags0;
231*4882a593Smuzhiyun batch[1] = flags1;
232*4882a593Smuzhiyun batch[2] = offset;
233*4882a593Smuzhiyun
234*4882a593Smuzhiyun return batch + 6;
235*4882a593Smuzhiyun }
236*4882a593Smuzhiyun
gen8_emit_pipe_control(u32 * batch,u32 flags,u32 offset)237*4882a593Smuzhiyun static inline u32 *gen8_emit_pipe_control(u32 *batch, u32 flags, u32 offset)
238*4882a593Smuzhiyun {
239*4882a593Smuzhiyun return __gen8_emit_pipe_control(batch, 0, flags, offset);
240*4882a593Smuzhiyun }
241*4882a593Smuzhiyun
gen12_emit_pipe_control(u32 * batch,u32 flags0,u32 flags1,u32 offset)242*4882a593Smuzhiyun static inline u32 *gen12_emit_pipe_control(u32 *batch, u32 flags0, u32 flags1, u32 offset)
243*4882a593Smuzhiyun {
244*4882a593Smuzhiyun return __gen8_emit_pipe_control(batch, flags0, flags1, offset);
245*4882a593Smuzhiyun }
246*4882a593Smuzhiyun
247*4882a593Smuzhiyun static inline u32 *
__gen8_emit_write_rcs(u32 * cs,u32 value,u32 offset,u32 flags0,u32 flags1)248*4882a593Smuzhiyun __gen8_emit_write_rcs(u32 *cs, u32 value, u32 offset, u32 flags0, u32 flags1)
249*4882a593Smuzhiyun {
250*4882a593Smuzhiyun *cs++ = GFX_OP_PIPE_CONTROL(6) | flags0;
251*4882a593Smuzhiyun *cs++ = flags1 | PIPE_CONTROL_QW_WRITE;
252*4882a593Smuzhiyun *cs++ = offset;
253*4882a593Smuzhiyun *cs++ = 0;
254*4882a593Smuzhiyun *cs++ = value;
255*4882a593Smuzhiyun *cs++ = 0; /* We're thrashing one extra dword. */
256*4882a593Smuzhiyun
257*4882a593Smuzhiyun return cs;
258*4882a593Smuzhiyun }
259*4882a593Smuzhiyun
260*4882a593Smuzhiyun static inline u32*
gen8_emit_ggtt_write_rcs(u32 * cs,u32 value,u32 gtt_offset,u32 flags)261*4882a593Smuzhiyun gen8_emit_ggtt_write_rcs(u32 *cs, u32 value, u32 gtt_offset, u32 flags)
262*4882a593Smuzhiyun {
263*4882a593Smuzhiyun /* We're using qword write, offset should be aligned to 8 bytes. */
264*4882a593Smuzhiyun GEM_BUG_ON(!IS_ALIGNED(gtt_offset, 8));
265*4882a593Smuzhiyun
266*4882a593Smuzhiyun return __gen8_emit_write_rcs(cs,
267*4882a593Smuzhiyun value,
268*4882a593Smuzhiyun gtt_offset,
269*4882a593Smuzhiyun 0,
270*4882a593Smuzhiyun flags | PIPE_CONTROL_GLOBAL_GTT_IVB);
271*4882a593Smuzhiyun }
272*4882a593Smuzhiyun
273*4882a593Smuzhiyun static inline u32*
gen12_emit_ggtt_write_rcs(u32 * cs,u32 value,u32 gtt_offset,u32 flags0,u32 flags1)274*4882a593Smuzhiyun gen12_emit_ggtt_write_rcs(u32 *cs, u32 value, u32 gtt_offset, u32 flags0, u32 flags1)
275*4882a593Smuzhiyun {
276*4882a593Smuzhiyun /* We're using qword write, offset should be aligned to 8 bytes. */
277*4882a593Smuzhiyun GEM_BUG_ON(!IS_ALIGNED(gtt_offset, 8));
278*4882a593Smuzhiyun
279*4882a593Smuzhiyun return __gen8_emit_write_rcs(cs,
280*4882a593Smuzhiyun value,
281*4882a593Smuzhiyun gtt_offset,
282*4882a593Smuzhiyun flags0,
283*4882a593Smuzhiyun flags1 | PIPE_CONTROL_GLOBAL_GTT_IVB);
284*4882a593Smuzhiyun }
285*4882a593Smuzhiyun
286*4882a593Smuzhiyun static inline u32 *
__gen8_emit_flush_dw(u32 * cs,u32 value,u32 gtt_offset,u32 flags)287*4882a593Smuzhiyun __gen8_emit_flush_dw(u32 *cs, u32 value, u32 gtt_offset, u32 flags)
288*4882a593Smuzhiyun {
289*4882a593Smuzhiyun *cs++ = (MI_FLUSH_DW + 1) | flags;
290*4882a593Smuzhiyun *cs++ = gtt_offset;
291*4882a593Smuzhiyun *cs++ = 0;
292*4882a593Smuzhiyun *cs++ = value;
293*4882a593Smuzhiyun
294*4882a593Smuzhiyun return cs;
295*4882a593Smuzhiyun }
296*4882a593Smuzhiyun
297*4882a593Smuzhiyun static inline u32 *
gen8_emit_ggtt_write(u32 * cs,u32 value,u32 gtt_offset,u32 flags)298*4882a593Smuzhiyun gen8_emit_ggtt_write(u32 *cs, u32 value, u32 gtt_offset, u32 flags)
299*4882a593Smuzhiyun {
300*4882a593Smuzhiyun /* w/a: bit 5 needs to be zero for MI_FLUSH_DW address. */
301*4882a593Smuzhiyun GEM_BUG_ON(gtt_offset & (1 << 5));
302*4882a593Smuzhiyun /* Offset should be aligned to 8 bytes for both (QW/DW) write types */
303*4882a593Smuzhiyun GEM_BUG_ON(!IS_ALIGNED(gtt_offset, 8));
304*4882a593Smuzhiyun
305*4882a593Smuzhiyun return __gen8_emit_flush_dw(cs,
306*4882a593Smuzhiyun value,
307*4882a593Smuzhiyun gtt_offset | MI_FLUSH_DW_USE_GTT,
308*4882a593Smuzhiyun flags | MI_FLUSH_DW_OP_STOREDW);
309*4882a593Smuzhiyun }
310*4882a593Smuzhiyun
__intel_engine_reset(struct intel_engine_cs * engine,bool stalled)311*4882a593Smuzhiyun static inline void __intel_engine_reset(struct intel_engine_cs *engine,
312*4882a593Smuzhiyun bool stalled)
313*4882a593Smuzhiyun {
314*4882a593Smuzhiyun if (engine->reset.rewind)
315*4882a593Smuzhiyun engine->reset.rewind(engine, stalled);
316*4882a593Smuzhiyun engine->serial++; /* contexts lost */
317*4882a593Smuzhiyun }
318*4882a593Smuzhiyun
319*4882a593Smuzhiyun bool intel_engines_are_idle(struct intel_gt *gt);
320*4882a593Smuzhiyun bool intel_engine_is_idle(struct intel_engine_cs *engine);
321*4882a593Smuzhiyun void intel_engine_flush_submission(struct intel_engine_cs *engine);
322*4882a593Smuzhiyun
323*4882a593Smuzhiyun void intel_engines_reset_default_submission(struct intel_gt *gt);
324*4882a593Smuzhiyun
325*4882a593Smuzhiyun bool intel_engine_can_store_dword(struct intel_engine_cs *engine);
326*4882a593Smuzhiyun
327*4882a593Smuzhiyun __printf(3, 4)
328*4882a593Smuzhiyun void intel_engine_dump(struct intel_engine_cs *engine,
329*4882a593Smuzhiyun struct drm_printer *m,
330*4882a593Smuzhiyun const char *header, ...);
331*4882a593Smuzhiyun
332*4882a593Smuzhiyun ktime_t intel_engine_get_busy_time(struct intel_engine_cs *engine,
333*4882a593Smuzhiyun ktime_t *now);
334*4882a593Smuzhiyun
335*4882a593Smuzhiyun struct i915_request *
336*4882a593Smuzhiyun intel_engine_find_active_request(struct intel_engine_cs *engine);
337*4882a593Smuzhiyun
338*4882a593Smuzhiyun u32 intel_engine_context_size(struct intel_gt *gt, u8 class);
339*4882a593Smuzhiyun
340*4882a593Smuzhiyun void intel_engine_init_active(struct intel_engine_cs *engine,
341*4882a593Smuzhiyun unsigned int subclass);
342*4882a593Smuzhiyun #define ENGINE_PHYSICAL 0
343*4882a593Smuzhiyun #define ENGINE_MOCK 1
344*4882a593Smuzhiyun #define ENGINE_VIRTUAL 2
345*4882a593Smuzhiyun
346*4882a593Smuzhiyun static inline bool
intel_engine_has_preempt_reset(const struct intel_engine_cs * engine)347*4882a593Smuzhiyun intel_engine_has_preempt_reset(const struct intel_engine_cs *engine)
348*4882a593Smuzhiyun {
349*4882a593Smuzhiyun if (!IS_ACTIVE(CONFIG_DRM_I915_PREEMPT_TIMEOUT))
350*4882a593Smuzhiyun return false;
351*4882a593Smuzhiyun
352*4882a593Smuzhiyun return intel_engine_has_preemption(engine);
353*4882a593Smuzhiyun }
354*4882a593Smuzhiyun
355*4882a593Smuzhiyun static inline bool
intel_engine_has_heartbeat(const struct intel_engine_cs * engine)356*4882a593Smuzhiyun intel_engine_has_heartbeat(const struct intel_engine_cs *engine)
357*4882a593Smuzhiyun {
358*4882a593Smuzhiyun if (!IS_ACTIVE(CONFIG_DRM_I915_HEARTBEAT_INTERVAL))
359*4882a593Smuzhiyun return false;
360*4882a593Smuzhiyun
361*4882a593Smuzhiyun return READ_ONCE(engine->props.heartbeat_interval_ms);
362*4882a593Smuzhiyun }
363*4882a593Smuzhiyun
364*4882a593Smuzhiyun #endif /* _INTEL_RINGBUFFER_H_ */
365