1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun * SPDX-License-Identifier: MIT
3*4882a593Smuzhiyun *
4*4882a593Smuzhiyun * Copyright © 2019 Intel Corporation
5*4882a593Smuzhiyun */
6*4882a593Smuzhiyun
7*4882a593Smuzhiyun #ifndef __INTEL_CONTEXT_H__
8*4882a593Smuzhiyun #define __INTEL_CONTEXT_H__
9*4882a593Smuzhiyun
10*4882a593Smuzhiyun #include <linux/bitops.h>
11*4882a593Smuzhiyun #include <linux/lockdep.h>
12*4882a593Smuzhiyun #include <linux/types.h>
13*4882a593Smuzhiyun
14*4882a593Smuzhiyun #include "i915_active.h"
15*4882a593Smuzhiyun #include "i915_drv.h"
16*4882a593Smuzhiyun #include "intel_context_types.h"
17*4882a593Smuzhiyun #include "intel_engine_types.h"
18*4882a593Smuzhiyun #include "intel_ring_types.h"
19*4882a593Smuzhiyun #include "intel_timeline_types.h"
20*4882a593Smuzhiyun
21*4882a593Smuzhiyun #define CE_TRACE(ce, fmt, ...) do { \
22*4882a593Smuzhiyun const struct intel_context *ce__ = (ce); \
23*4882a593Smuzhiyun ENGINE_TRACE(ce__->engine, "context:%llx " fmt, \
24*4882a593Smuzhiyun ce__->timeline->fence_context, \
25*4882a593Smuzhiyun ##__VA_ARGS__); \
26*4882a593Smuzhiyun } while (0)
27*4882a593Smuzhiyun
28*4882a593Smuzhiyun struct i915_gem_ww_ctx;
29*4882a593Smuzhiyun
30*4882a593Smuzhiyun void intel_context_init(struct intel_context *ce,
31*4882a593Smuzhiyun struct intel_engine_cs *engine);
32*4882a593Smuzhiyun void intel_context_fini(struct intel_context *ce);
33*4882a593Smuzhiyun
34*4882a593Smuzhiyun struct intel_context *
35*4882a593Smuzhiyun intel_context_create(struct intel_engine_cs *engine);
36*4882a593Smuzhiyun
37*4882a593Smuzhiyun int intel_context_alloc_state(struct intel_context *ce);
38*4882a593Smuzhiyun
39*4882a593Smuzhiyun void intel_context_free(struct intel_context *ce);
40*4882a593Smuzhiyun
41*4882a593Smuzhiyun int intel_context_reconfigure_sseu(struct intel_context *ce,
42*4882a593Smuzhiyun const struct intel_sseu sseu);
43*4882a593Smuzhiyun
44*4882a593Smuzhiyun /**
45*4882a593Smuzhiyun * intel_context_lock_pinned - Stablises the 'pinned' status of the HW context
46*4882a593Smuzhiyun * @ce - the context
47*4882a593Smuzhiyun *
48*4882a593Smuzhiyun * Acquire a lock on the pinned status of the HW context, such that the context
49*4882a593Smuzhiyun * can neither be bound to the GPU or unbound whilst the lock is held, i.e.
50*4882a593Smuzhiyun * intel_context_is_pinned() remains stable.
51*4882a593Smuzhiyun */
intel_context_lock_pinned(struct intel_context * ce)52*4882a593Smuzhiyun static inline int intel_context_lock_pinned(struct intel_context *ce)
53*4882a593Smuzhiyun __acquires(ce->pin_mutex)
54*4882a593Smuzhiyun {
55*4882a593Smuzhiyun return mutex_lock_interruptible(&ce->pin_mutex);
56*4882a593Smuzhiyun }
57*4882a593Smuzhiyun
58*4882a593Smuzhiyun /**
59*4882a593Smuzhiyun * intel_context_is_pinned - Reports the 'pinned' status
60*4882a593Smuzhiyun * @ce - the context
61*4882a593Smuzhiyun *
62*4882a593Smuzhiyun * While in use by the GPU, the context, along with its ring and page
63*4882a593Smuzhiyun * tables is pinned into memory and the GTT.
64*4882a593Smuzhiyun *
65*4882a593Smuzhiyun * Returns: true if the context is currently pinned for use by the GPU.
66*4882a593Smuzhiyun */
67*4882a593Smuzhiyun static inline bool
intel_context_is_pinned(struct intel_context * ce)68*4882a593Smuzhiyun intel_context_is_pinned(struct intel_context *ce)
69*4882a593Smuzhiyun {
70*4882a593Smuzhiyun return atomic_read(&ce->pin_count);
71*4882a593Smuzhiyun }
72*4882a593Smuzhiyun
73*4882a593Smuzhiyun /**
74*4882a593Smuzhiyun * intel_context_unlock_pinned - Releases the earlier locking of 'pinned' status
75*4882a593Smuzhiyun * @ce - the context
76*4882a593Smuzhiyun *
77*4882a593Smuzhiyun * Releases the lock earlier acquired by intel_context_unlock_pinned().
78*4882a593Smuzhiyun */
intel_context_unlock_pinned(struct intel_context * ce)79*4882a593Smuzhiyun static inline void intel_context_unlock_pinned(struct intel_context *ce)
80*4882a593Smuzhiyun __releases(ce->pin_mutex)
81*4882a593Smuzhiyun {
82*4882a593Smuzhiyun mutex_unlock(&ce->pin_mutex);
83*4882a593Smuzhiyun }
84*4882a593Smuzhiyun
85*4882a593Smuzhiyun int __intel_context_do_pin(struct intel_context *ce);
86*4882a593Smuzhiyun int __intel_context_do_pin_ww(struct intel_context *ce,
87*4882a593Smuzhiyun struct i915_gem_ww_ctx *ww);
88*4882a593Smuzhiyun
intel_context_pin_if_active(struct intel_context * ce)89*4882a593Smuzhiyun static inline bool intel_context_pin_if_active(struct intel_context *ce)
90*4882a593Smuzhiyun {
91*4882a593Smuzhiyun return atomic_inc_not_zero(&ce->pin_count);
92*4882a593Smuzhiyun }
93*4882a593Smuzhiyun
intel_context_pin(struct intel_context * ce)94*4882a593Smuzhiyun static inline int intel_context_pin(struct intel_context *ce)
95*4882a593Smuzhiyun {
96*4882a593Smuzhiyun if (likely(intel_context_pin_if_active(ce)))
97*4882a593Smuzhiyun return 0;
98*4882a593Smuzhiyun
99*4882a593Smuzhiyun return __intel_context_do_pin(ce);
100*4882a593Smuzhiyun }
101*4882a593Smuzhiyun
intel_context_pin_ww(struct intel_context * ce,struct i915_gem_ww_ctx * ww)102*4882a593Smuzhiyun static inline int intel_context_pin_ww(struct intel_context *ce,
103*4882a593Smuzhiyun struct i915_gem_ww_ctx *ww)
104*4882a593Smuzhiyun {
105*4882a593Smuzhiyun if (likely(intel_context_pin_if_active(ce)))
106*4882a593Smuzhiyun return 0;
107*4882a593Smuzhiyun
108*4882a593Smuzhiyun return __intel_context_do_pin_ww(ce, ww);
109*4882a593Smuzhiyun }
110*4882a593Smuzhiyun
__intel_context_pin(struct intel_context * ce)111*4882a593Smuzhiyun static inline void __intel_context_pin(struct intel_context *ce)
112*4882a593Smuzhiyun {
113*4882a593Smuzhiyun GEM_BUG_ON(!intel_context_is_pinned(ce));
114*4882a593Smuzhiyun atomic_inc(&ce->pin_count);
115*4882a593Smuzhiyun }
116*4882a593Smuzhiyun
117*4882a593Smuzhiyun void intel_context_unpin(struct intel_context *ce);
118*4882a593Smuzhiyun
119*4882a593Smuzhiyun void intel_context_enter_engine(struct intel_context *ce);
120*4882a593Smuzhiyun void intel_context_exit_engine(struct intel_context *ce);
121*4882a593Smuzhiyun
intel_context_enter(struct intel_context * ce)122*4882a593Smuzhiyun static inline void intel_context_enter(struct intel_context *ce)
123*4882a593Smuzhiyun {
124*4882a593Smuzhiyun lockdep_assert_held(&ce->timeline->mutex);
125*4882a593Smuzhiyun if (!ce->active_count++)
126*4882a593Smuzhiyun ce->ops->enter(ce);
127*4882a593Smuzhiyun }
128*4882a593Smuzhiyun
intel_context_mark_active(struct intel_context * ce)129*4882a593Smuzhiyun static inline void intel_context_mark_active(struct intel_context *ce)
130*4882a593Smuzhiyun {
131*4882a593Smuzhiyun lockdep_assert_held(&ce->timeline->mutex);
132*4882a593Smuzhiyun ++ce->active_count;
133*4882a593Smuzhiyun }
134*4882a593Smuzhiyun
intel_context_exit(struct intel_context * ce)135*4882a593Smuzhiyun static inline void intel_context_exit(struct intel_context *ce)
136*4882a593Smuzhiyun {
137*4882a593Smuzhiyun lockdep_assert_held(&ce->timeline->mutex);
138*4882a593Smuzhiyun GEM_BUG_ON(!ce->active_count);
139*4882a593Smuzhiyun if (!--ce->active_count)
140*4882a593Smuzhiyun ce->ops->exit(ce);
141*4882a593Smuzhiyun }
142*4882a593Smuzhiyun
intel_context_get(struct intel_context * ce)143*4882a593Smuzhiyun static inline struct intel_context *intel_context_get(struct intel_context *ce)
144*4882a593Smuzhiyun {
145*4882a593Smuzhiyun kref_get(&ce->ref);
146*4882a593Smuzhiyun return ce;
147*4882a593Smuzhiyun }
148*4882a593Smuzhiyun
intel_context_put(struct intel_context * ce)149*4882a593Smuzhiyun static inline void intel_context_put(struct intel_context *ce)
150*4882a593Smuzhiyun {
151*4882a593Smuzhiyun kref_put(&ce->ref, ce->ops->destroy);
152*4882a593Smuzhiyun }
153*4882a593Smuzhiyun
154*4882a593Smuzhiyun static inline struct intel_timeline *__must_check
intel_context_timeline_lock(struct intel_context * ce)155*4882a593Smuzhiyun intel_context_timeline_lock(struct intel_context *ce)
156*4882a593Smuzhiyun __acquires(&ce->timeline->mutex)
157*4882a593Smuzhiyun {
158*4882a593Smuzhiyun struct intel_timeline *tl = ce->timeline;
159*4882a593Smuzhiyun int err;
160*4882a593Smuzhiyun
161*4882a593Smuzhiyun err = mutex_lock_interruptible(&tl->mutex);
162*4882a593Smuzhiyun if (err)
163*4882a593Smuzhiyun return ERR_PTR(err);
164*4882a593Smuzhiyun
165*4882a593Smuzhiyun return tl;
166*4882a593Smuzhiyun }
167*4882a593Smuzhiyun
intel_context_timeline_unlock(struct intel_timeline * tl)168*4882a593Smuzhiyun static inline void intel_context_timeline_unlock(struct intel_timeline *tl)
169*4882a593Smuzhiyun __releases(&tl->mutex)
170*4882a593Smuzhiyun {
171*4882a593Smuzhiyun mutex_unlock(&tl->mutex);
172*4882a593Smuzhiyun }
173*4882a593Smuzhiyun
174*4882a593Smuzhiyun int intel_context_prepare_remote_request(struct intel_context *ce,
175*4882a593Smuzhiyun struct i915_request *rq);
176*4882a593Smuzhiyun
177*4882a593Smuzhiyun struct i915_request *intel_context_create_request(struct intel_context *ce);
178*4882a593Smuzhiyun
__intel_context_ring_size(u64 sz)179*4882a593Smuzhiyun static inline struct intel_ring *__intel_context_ring_size(u64 sz)
180*4882a593Smuzhiyun {
181*4882a593Smuzhiyun return u64_to_ptr(struct intel_ring, sz);
182*4882a593Smuzhiyun }
183*4882a593Smuzhiyun
intel_context_is_barrier(const struct intel_context * ce)184*4882a593Smuzhiyun static inline bool intel_context_is_barrier(const struct intel_context *ce)
185*4882a593Smuzhiyun {
186*4882a593Smuzhiyun return test_bit(CONTEXT_BARRIER_BIT, &ce->flags);
187*4882a593Smuzhiyun }
188*4882a593Smuzhiyun
intel_context_is_closed(const struct intel_context * ce)189*4882a593Smuzhiyun static inline bool intel_context_is_closed(const struct intel_context *ce)
190*4882a593Smuzhiyun {
191*4882a593Smuzhiyun return test_bit(CONTEXT_CLOSED_BIT, &ce->flags);
192*4882a593Smuzhiyun }
193*4882a593Smuzhiyun
intel_context_use_semaphores(const struct intel_context * ce)194*4882a593Smuzhiyun static inline bool intel_context_use_semaphores(const struct intel_context *ce)
195*4882a593Smuzhiyun {
196*4882a593Smuzhiyun return test_bit(CONTEXT_USE_SEMAPHORES, &ce->flags);
197*4882a593Smuzhiyun }
198*4882a593Smuzhiyun
intel_context_set_use_semaphores(struct intel_context * ce)199*4882a593Smuzhiyun static inline void intel_context_set_use_semaphores(struct intel_context *ce)
200*4882a593Smuzhiyun {
201*4882a593Smuzhiyun set_bit(CONTEXT_USE_SEMAPHORES, &ce->flags);
202*4882a593Smuzhiyun }
203*4882a593Smuzhiyun
intel_context_clear_use_semaphores(struct intel_context * ce)204*4882a593Smuzhiyun static inline void intel_context_clear_use_semaphores(struct intel_context *ce)
205*4882a593Smuzhiyun {
206*4882a593Smuzhiyun clear_bit(CONTEXT_USE_SEMAPHORES, &ce->flags);
207*4882a593Smuzhiyun }
208*4882a593Smuzhiyun
intel_context_is_banned(const struct intel_context * ce)209*4882a593Smuzhiyun static inline bool intel_context_is_banned(const struct intel_context *ce)
210*4882a593Smuzhiyun {
211*4882a593Smuzhiyun return test_bit(CONTEXT_BANNED, &ce->flags);
212*4882a593Smuzhiyun }
213*4882a593Smuzhiyun
intel_context_set_banned(struct intel_context * ce)214*4882a593Smuzhiyun static inline bool intel_context_set_banned(struct intel_context *ce)
215*4882a593Smuzhiyun {
216*4882a593Smuzhiyun return test_and_set_bit(CONTEXT_BANNED, &ce->flags);
217*4882a593Smuzhiyun }
218*4882a593Smuzhiyun
219*4882a593Smuzhiyun static inline bool
intel_context_force_single_submission(const struct intel_context * ce)220*4882a593Smuzhiyun intel_context_force_single_submission(const struct intel_context *ce)
221*4882a593Smuzhiyun {
222*4882a593Smuzhiyun return test_bit(CONTEXT_FORCE_SINGLE_SUBMISSION, &ce->flags);
223*4882a593Smuzhiyun }
224*4882a593Smuzhiyun
225*4882a593Smuzhiyun static inline void
intel_context_set_single_submission(struct intel_context * ce)226*4882a593Smuzhiyun intel_context_set_single_submission(struct intel_context *ce)
227*4882a593Smuzhiyun {
228*4882a593Smuzhiyun __set_bit(CONTEXT_FORCE_SINGLE_SUBMISSION, &ce->flags);
229*4882a593Smuzhiyun }
230*4882a593Smuzhiyun
231*4882a593Smuzhiyun static inline bool
intel_context_nopreempt(const struct intel_context * ce)232*4882a593Smuzhiyun intel_context_nopreempt(const struct intel_context *ce)
233*4882a593Smuzhiyun {
234*4882a593Smuzhiyun return test_bit(CONTEXT_NOPREEMPT, &ce->flags);
235*4882a593Smuzhiyun }
236*4882a593Smuzhiyun
237*4882a593Smuzhiyun static inline void
intel_context_set_nopreempt(struct intel_context * ce)238*4882a593Smuzhiyun intel_context_set_nopreempt(struct intel_context *ce)
239*4882a593Smuzhiyun {
240*4882a593Smuzhiyun set_bit(CONTEXT_NOPREEMPT, &ce->flags);
241*4882a593Smuzhiyun }
242*4882a593Smuzhiyun
243*4882a593Smuzhiyun static inline void
intel_context_clear_nopreempt(struct intel_context * ce)244*4882a593Smuzhiyun intel_context_clear_nopreempt(struct intel_context *ce)
245*4882a593Smuzhiyun {
246*4882a593Smuzhiyun clear_bit(CONTEXT_NOPREEMPT, &ce->flags);
247*4882a593Smuzhiyun }
248*4882a593Smuzhiyun
intel_context_get_total_runtime_ns(struct intel_context * ce)249*4882a593Smuzhiyun static inline u64 intel_context_get_total_runtime_ns(struct intel_context *ce)
250*4882a593Smuzhiyun {
251*4882a593Smuzhiyun const u32 period =
252*4882a593Smuzhiyun RUNTIME_INFO(ce->engine->i915)->cs_timestamp_period_ns;
253*4882a593Smuzhiyun
254*4882a593Smuzhiyun return READ_ONCE(ce->runtime.total) * period;
255*4882a593Smuzhiyun }
256*4882a593Smuzhiyun
intel_context_get_avg_runtime_ns(struct intel_context * ce)257*4882a593Smuzhiyun static inline u64 intel_context_get_avg_runtime_ns(struct intel_context *ce)
258*4882a593Smuzhiyun {
259*4882a593Smuzhiyun const u32 period =
260*4882a593Smuzhiyun RUNTIME_INFO(ce->engine->i915)->cs_timestamp_period_ns;
261*4882a593Smuzhiyun
262*4882a593Smuzhiyun return mul_u32_u32(ewma_runtime_read(&ce->runtime.avg), period);
263*4882a593Smuzhiyun }
264*4882a593Smuzhiyun
265*4882a593Smuzhiyun #endif /* __INTEL_CONTEXT_H__ */
266