1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun * Copyright © 2008-2018 Intel Corporation
3*4882a593Smuzhiyun *
4*4882a593Smuzhiyun * Permission is hereby granted, free of charge, to any person obtaining a
5*4882a593Smuzhiyun * copy of this software and associated documentation files (the "Software"),
6*4882a593Smuzhiyun * to deal in the Software without restriction, including without limitation
7*4882a593Smuzhiyun * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8*4882a593Smuzhiyun * and/or sell copies of the Software, and to permit persons to whom the
9*4882a593Smuzhiyun * Software is furnished to do so, subject to the following conditions:
10*4882a593Smuzhiyun *
11*4882a593Smuzhiyun * The above copyright notice and this permission notice (including the next
12*4882a593Smuzhiyun * paragraph) shall be included in all copies or substantial portions of the
13*4882a593Smuzhiyun * Software.
14*4882a593Smuzhiyun *
15*4882a593Smuzhiyun * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16*4882a593Smuzhiyun * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17*4882a593Smuzhiyun * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18*4882a593Smuzhiyun * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19*4882a593Smuzhiyun * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20*4882a593Smuzhiyun * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21*4882a593Smuzhiyun * IN THE SOFTWARE.
22*4882a593Smuzhiyun *
23*4882a593Smuzhiyun */
24*4882a593Smuzhiyun
25*4882a593Smuzhiyun #ifndef I915_REQUEST_H
26*4882a593Smuzhiyun #define I915_REQUEST_H
27*4882a593Smuzhiyun
28*4882a593Smuzhiyun #include <linux/dma-fence.h>
29*4882a593Smuzhiyun #include <linux/irq_work.h>
30*4882a593Smuzhiyun #include <linux/lockdep.h>
31*4882a593Smuzhiyun
32*4882a593Smuzhiyun #include "gem/i915_gem_context_types.h"
33*4882a593Smuzhiyun #include "gt/intel_context_types.h"
34*4882a593Smuzhiyun #include "gt/intel_engine_types.h"
35*4882a593Smuzhiyun #include "gt/intel_timeline_types.h"
36*4882a593Smuzhiyun
37*4882a593Smuzhiyun #include "i915_gem.h"
38*4882a593Smuzhiyun #include "i915_scheduler.h"
39*4882a593Smuzhiyun #include "i915_selftest.h"
40*4882a593Smuzhiyun #include "i915_sw_fence.h"
41*4882a593Smuzhiyun
42*4882a593Smuzhiyun #include <uapi/drm/i915_drm.h>
43*4882a593Smuzhiyun
44*4882a593Smuzhiyun struct drm_file;
45*4882a593Smuzhiyun struct drm_i915_gem_object;
46*4882a593Smuzhiyun struct i915_request;
47*4882a593Smuzhiyun
48*4882a593Smuzhiyun struct i915_capture_list {
49*4882a593Smuzhiyun struct i915_capture_list *next;
50*4882a593Smuzhiyun struct i915_vma *vma;
51*4882a593Smuzhiyun };
52*4882a593Smuzhiyun
53*4882a593Smuzhiyun #define RQ_TRACE(rq, fmt, ...) do { \
54*4882a593Smuzhiyun const struct i915_request *rq__ = (rq); \
55*4882a593Smuzhiyun ENGINE_TRACE(rq__->engine, "fence %llx:%lld, current %d " fmt, \
56*4882a593Smuzhiyun rq__->fence.context, rq__->fence.seqno, \
57*4882a593Smuzhiyun hwsp_seqno(rq__), ##__VA_ARGS__); \
58*4882a593Smuzhiyun } while (0)
59*4882a593Smuzhiyun
60*4882a593Smuzhiyun enum {
61*4882a593Smuzhiyun /*
62*4882a593Smuzhiyun * I915_FENCE_FLAG_ACTIVE - this request is currently submitted to HW.
63*4882a593Smuzhiyun *
64*4882a593Smuzhiyun * Set by __i915_request_submit() on handing over to HW, and cleared
65*4882a593Smuzhiyun * by __i915_request_unsubmit() if we preempt this request.
66*4882a593Smuzhiyun *
67*4882a593Smuzhiyun * Finally cleared for consistency on retiring the request, when
68*4882a593Smuzhiyun * we know the HW is no longer running this request.
69*4882a593Smuzhiyun *
70*4882a593Smuzhiyun * See i915_request_is_active()
71*4882a593Smuzhiyun */
72*4882a593Smuzhiyun I915_FENCE_FLAG_ACTIVE = DMA_FENCE_FLAG_USER_BITS,
73*4882a593Smuzhiyun
74*4882a593Smuzhiyun /*
75*4882a593Smuzhiyun * I915_FENCE_FLAG_PQUEUE - this request is ready for execution
76*4882a593Smuzhiyun *
77*4882a593Smuzhiyun * Using the scheduler, when a request is ready for execution it is put
78*4882a593Smuzhiyun * into the priority queue, and removed from that queue when transferred
79*4882a593Smuzhiyun * to the HW runlists. We want to track its membership within the
80*4882a593Smuzhiyun * priority queue so that we can easily check before rescheduling.
81*4882a593Smuzhiyun *
82*4882a593Smuzhiyun * See i915_request_in_priority_queue()
83*4882a593Smuzhiyun */
84*4882a593Smuzhiyun I915_FENCE_FLAG_PQUEUE,
85*4882a593Smuzhiyun
86*4882a593Smuzhiyun /*
87*4882a593Smuzhiyun * I915_FENCE_FLAG_HOLD - this request is currently on hold
88*4882a593Smuzhiyun *
89*4882a593Smuzhiyun * This request has been suspended, pending an ongoing investigation.
90*4882a593Smuzhiyun */
91*4882a593Smuzhiyun I915_FENCE_FLAG_HOLD,
92*4882a593Smuzhiyun
93*4882a593Smuzhiyun /*
94*4882a593Smuzhiyun * I915_FENCE_FLAG_INITIAL_BREADCRUMB - this request has the initial
95*4882a593Smuzhiyun * breadcrumb that marks the end of semaphore waits and start of the
96*4882a593Smuzhiyun * user payload.
97*4882a593Smuzhiyun */
98*4882a593Smuzhiyun I915_FENCE_FLAG_INITIAL_BREADCRUMB,
99*4882a593Smuzhiyun
100*4882a593Smuzhiyun /*
101*4882a593Smuzhiyun * I915_FENCE_FLAG_SIGNAL - this request is currently on signal_list
102*4882a593Smuzhiyun *
103*4882a593Smuzhiyun * Internal bookkeeping used by the breadcrumb code to track when
104*4882a593Smuzhiyun * a request is on the various signal_list.
105*4882a593Smuzhiyun */
106*4882a593Smuzhiyun I915_FENCE_FLAG_SIGNAL,
107*4882a593Smuzhiyun
108*4882a593Smuzhiyun /*
109*4882a593Smuzhiyun * I915_FENCE_FLAG_NOPREEMPT - this request should not be preempted
110*4882a593Smuzhiyun *
111*4882a593Smuzhiyun * The execution of some requests should not be interrupted. This is
112*4882a593Smuzhiyun * a sensitive operation as it makes the request super important,
113*4882a593Smuzhiyun * blocking other higher priority work. Abuse of this flag will
114*4882a593Smuzhiyun * lead to quality of service issues.
115*4882a593Smuzhiyun */
116*4882a593Smuzhiyun I915_FENCE_FLAG_NOPREEMPT,
117*4882a593Smuzhiyun
118*4882a593Smuzhiyun /*
119*4882a593Smuzhiyun * I915_FENCE_FLAG_SENTINEL - this request should be last in the queue
120*4882a593Smuzhiyun *
121*4882a593Smuzhiyun * A high priority sentinel request may be submitted to clear the
122*4882a593Smuzhiyun * submission queue. As it will be the only request in-flight, upon
123*4882a593Smuzhiyun * execution all other active requests will have been preempted and
124*4882a593Smuzhiyun * unsubmitted. This preemptive pulse is used to re-evaluate the
125*4882a593Smuzhiyun * in-flight requests, particularly in cases where an active context
126*4882a593Smuzhiyun * is banned and those active requests need to be cancelled.
127*4882a593Smuzhiyun */
128*4882a593Smuzhiyun I915_FENCE_FLAG_SENTINEL,
129*4882a593Smuzhiyun
130*4882a593Smuzhiyun /*
131*4882a593Smuzhiyun * I915_FENCE_FLAG_BOOST - upclock the gpu for this request
132*4882a593Smuzhiyun *
133*4882a593Smuzhiyun * Some requests are more important than others! In particular, a
134*4882a593Smuzhiyun * request that the user is waiting on is typically required for
135*4882a593Smuzhiyun * interactive latency, for which we want to minimise by upclocking
136*4882a593Smuzhiyun * the GPU. Here we track such boost requests on a per-request basis.
137*4882a593Smuzhiyun */
138*4882a593Smuzhiyun I915_FENCE_FLAG_BOOST,
139*4882a593Smuzhiyun };
140*4882a593Smuzhiyun
141*4882a593Smuzhiyun /**
142*4882a593Smuzhiyun * Request queue structure.
143*4882a593Smuzhiyun *
144*4882a593Smuzhiyun * The request queue allows us to note sequence numbers that have been emitted
145*4882a593Smuzhiyun * and may be associated with active buffers to be retired.
146*4882a593Smuzhiyun *
147*4882a593Smuzhiyun * By keeping this list, we can avoid having to do questionable sequence
148*4882a593Smuzhiyun * number comparisons on buffer last_read|write_seqno. It also allows an
149*4882a593Smuzhiyun * emission time to be associated with the request for tracking how far ahead
150*4882a593Smuzhiyun * of the GPU the submission is.
151*4882a593Smuzhiyun *
152*4882a593Smuzhiyun * When modifying this structure be very aware that we perform a lockless
153*4882a593Smuzhiyun * RCU lookup of it that may race against reallocation of the struct
154*4882a593Smuzhiyun * from the slab freelist. We intentionally do not zero the structure on
155*4882a593Smuzhiyun * allocation so that the lookup can use the dangling pointers (and is
156*4882a593Smuzhiyun * cogniscent that those pointers may be wrong). Instead, everything that
157*4882a593Smuzhiyun * needs to be initialised must be done so explicitly.
158*4882a593Smuzhiyun *
159*4882a593Smuzhiyun * The requests are reference counted.
160*4882a593Smuzhiyun */
161*4882a593Smuzhiyun struct i915_request {
162*4882a593Smuzhiyun struct dma_fence fence;
163*4882a593Smuzhiyun spinlock_t lock;
164*4882a593Smuzhiyun
165*4882a593Smuzhiyun /**
166*4882a593Smuzhiyun * Context and ring buffer related to this request
167*4882a593Smuzhiyun * Contexts are refcounted, so when this request is associated with a
168*4882a593Smuzhiyun * context, we must increment the context's refcount, to guarantee that
169*4882a593Smuzhiyun * it persists while any request is linked to it. Requests themselves
170*4882a593Smuzhiyun * are also refcounted, so the request will only be freed when the last
171*4882a593Smuzhiyun * reference to it is dismissed, and the code in
172*4882a593Smuzhiyun * i915_request_free() will then decrement the refcount on the
173*4882a593Smuzhiyun * context.
174*4882a593Smuzhiyun */
175*4882a593Smuzhiyun struct intel_engine_cs *engine;
176*4882a593Smuzhiyun struct intel_context *context;
177*4882a593Smuzhiyun struct intel_ring *ring;
178*4882a593Smuzhiyun struct intel_timeline __rcu *timeline;
179*4882a593Smuzhiyun
180*4882a593Smuzhiyun struct list_head signal_link;
181*4882a593Smuzhiyun struct llist_node signal_node;
182*4882a593Smuzhiyun
183*4882a593Smuzhiyun /*
184*4882a593Smuzhiyun * The rcu epoch of when this request was allocated. Used to judiciously
185*4882a593Smuzhiyun * apply backpressure on future allocations to ensure that under
186*4882a593Smuzhiyun * mempressure there is sufficient RCU ticks for us to reclaim our
187*4882a593Smuzhiyun * RCU protected slabs.
188*4882a593Smuzhiyun */
189*4882a593Smuzhiyun unsigned long rcustate;
190*4882a593Smuzhiyun
191*4882a593Smuzhiyun /*
192*4882a593Smuzhiyun * We pin the timeline->mutex while constructing the request to
193*4882a593Smuzhiyun * ensure that no caller accidentally drops it during construction.
194*4882a593Smuzhiyun * The timeline->mutex must be held to ensure that only this caller
195*4882a593Smuzhiyun * can use the ring and manipulate the associated timeline during
196*4882a593Smuzhiyun * construction.
197*4882a593Smuzhiyun */
198*4882a593Smuzhiyun struct pin_cookie cookie;
199*4882a593Smuzhiyun
200*4882a593Smuzhiyun /*
201*4882a593Smuzhiyun * Fences for the various phases in the request's lifetime.
202*4882a593Smuzhiyun *
203*4882a593Smuzhiyun * The submit fence is used to await upon all of the request's
204*4882a593Smuzhiyun * dependencies. When it is signaled, the request is ready to run.
205*4882a593Smuzhiyun * It is used by the driver to then queue the request for execution.
206*4882a593Smuzhiyun */
207*4882a593Smuzhiyun struct i915_sw_fence submit;
208*4882a593Smuzhiyun union {
209*4882a593Smuzhiyun wait_queue_entry_t submitq;
210*4882a593Smuzhiyun struct i915_sw_dma_fence_cb dmaq;
211*4882a593Smuzhiyun struct i915_request_duration_cb {
212*4882a593Smuzhiyun struct dma_fence_cb cb;
213*4882a593Smuzhiyun ktime_t emitted;
214*4882a593Smuzhiyun } duration;
215*4882a593Smuzhiyun };
216*4882a593Smuzhiyun struct llist_head execute_cb;
217*4882a593Smuzhiyun struct i915_sw_fence semaphore;
218*4882a593Smuzhiyun
219*4882a593Smuzhiyun /*
220*4882a593Smuzhiyun * A list of everyone we wait upon, and everyone who waits upon us.
221*4882a593Smuzhiyun * Even though we will not be submitted to the hardware before the
222*4882a593Smuzhiyun * submit fence is signaled (it waits for all external events as well
223*4882a593Smuzhiyun * as our own requests), the scheduler still needs to know the
224*4882a593Smuzhiyun * dependency tree for the lifetime of the request (from execbuf
225*4882a593Smuzhiyun * to retirement), i.e. bidirectional dependency information for the
226*4882a593Smuzhiyun * request not tied to individual fences.
227*4882a593Smuzhiyun */
228*4882a593Smuzhiyun struct i915_sched_node sched;
229*4882a593Smuzhiyun struct i915_dependency dep;
230*4882a593Smuzhiyun intel_engine_mask_t execution_mask;
231*4882a593Smuzhiyun
232*4882a593Smuzhiyun /*
233*4882a593Smuzhiyun * A convenience pointer to the current breadcrumb value stored in
234*4882a593Smuzhiyun * the HW status page (or our timeline's local equivalent). The full
235*4882a593Smuzhiyun * path would be rq->hw_context->ring->timeline->hwsp_seqno.
236*4882a593Smuzhiyun */
237*4882a593Smuzhiyun const u32 *hwsp_seqno;
238*4882a593Smuzhiyun
239*4882a593Smuzhiyun /*
240*4882a593Smuzhiyun * If we need to access the timeline's seqno for this request in
241*4882a593Smuzhiyun * another request, we need to keep a read reference to this associated
242*4882a593Smuzhiyun * cacheline, so that we do not free and recycle it before the foreign
243*4882a593Smuzhiyun * observers have completed. Hence, we keep a pointer to the cacheline
244*4882a593Smuzhiyun * inside the timeline's HWSP vma, but it is only valid while this
245*4882a593Smuzhiyun * request has not completed and guarded by the timeline mutex.
246*4882a593Smuzhiyun */
247*4882a593Smuzhiyun struct intel_timeline_cacheline __rcu *hwsp_cacheline;
248*4882a593Smuzhiyun
249*4882a593Smuzhiyun /** Position in the ring of the start of the request */
250*4882a593Smuzhiyun u32 head;
251*4882a593Smuzhiyun
252*4882a593Smuzhiyun /** Position in the ring of the start of the user packets */
253*4882a593Smuzhiyun u32 infix;
254*4882a593Smuzhiyun
255*4882a593Smuzhiyun /**
256*4882a593Smuzhiyun * Position in the ring of the start of the postfix.
257*4882a593Smuzhiyun * This is required to calculate the maximum available ring space
258*4882a593Smuzhiyun * without overwriting the postfix.
259*4882a593Smuzhiyun */
260*4882a593Smuzhiyun u32 postfix;
261*4882a593Smuzhiyun
262*4882a593Smuzhiyun /** Position in the ring of the end of the whole request */
263*4882a593Smuzhiyun u32 tail;
264*4882a593Smuzhiyun
265*4882a593Smuzhiyun /** Position in the ring of the end of any workarounds after the tail */
266*4882a593Smuzhiyun u32 wa_tail;
267*4882a593Smuzhiyun
268*4882a593Smuzhiyun /** Preallocate space in the ring for the emitting the request */
269*4882a593Smuzhiyun u32 reserved_space;
270*4882a593Smuzhiyun
271*4882a593Smuzhiyun /** Batch buffer related to this request if any (used for
272*4882a593Smuzhiyun * error state dump only).
273*4882a593Smuzhiyun */
274*4882a593Smuzhiyun struct i915_vma *batch;
275*4882a593Smuzhiyun /**
276*4882a593Smuzhiyun * Additional buffers requested by userspace to be captured upon
277*4882a593Smuzhiyun * a GPU hang. The vma/obj on this list are protected by their
278*4882a593Smuzhiyun * active reference - all objects on this list must also be
279*4882a593Smuzhiyun * on the active_list (of their final request).
280*4882a593Smuzhiyun */
281*4882a593Smuzhiyun struct i915_capture_list *capture_list;
282*4882a593Smuzhiyun
283*4882a593Smuzhiyun /** Time at which this request was emitted, in jiffies. */
284*4882a593Smuzhiyun unsigned long emitted_jiffies;
285*4882a593Smuzhiyun
286*4882a593Smuzhiyun /** timeline->request entry for this request */
287*4882a593Smuzhiyun struct list_head link;
288*4882a593Smuzhiyun
289*4882a593Smuzhiyun I915_SELFTEST_DECLARE(struct {
290*4882a593Smuzhiyun struct list_head link;
291*4882a593Smuzhiyun unsigned long delay;
292*4882a593Smuzhiyun } mock;)
293*4882a593Smuzhiyun };
294*4882a593Smuzhiyun
295*4882a593Smuzhiyun #define I915_FENCE_GFP (GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN)
296*4882a593Smuzhiyun
297*4882a593Smuzhiyun extern const struct dma_fence_ops i915_fence_ops;
298*4882a593Smuzhiyun
dma_fence_is_i915(const struct dma_fence * fence)299*4882a593Smuzhiyun static inline bool dma_fence_is_i915(const struct dma_fence *fence)
300*4882a593Smuzhiyun {
301*4882a593Smuzhiyun return fence->ops == &i915_fence_ops;
302*4882a593Smuzhiyun }
303*4882a593Smuzhiyun
304*4882a593Smuzhiyun struct kmem_cache *i915_request_slab_cache(void);
305*4882a593Smuzhiyun
306*4882a593Smuzhiyun struct i915_request * __must_check
307*4882a593Smuzhiyun __i915_request_create(struct intel_context *ce, gfp_t gfp);
308*4882a593Smuzhiyun struct i915_request * __must_check
309*4882a593Smuzhiyun i915_request_create(struct intel_context *ce);
310*4882a593Smuzhiyun
311*4882a593Smuzhiyun void i915_request_set_error_once(struct i915_request *rq, int error);
312*4882a593Smuzhiyun void __i915_request_skip(struct i915_request *rq);
313*4882a593Smuzhiyun
314*4882a593Smuzhiyun struct i915_request *__i915_request_commit(struct i915_request *request);
315*4882a593Smuzhiyun void __i915_request_queue(struct i915_request *rq,
316*4882a593Smuzhiyun const struct i915_sched_attr *attr);
317*4882a593Smuzhiyun
318*4882a593Smuzhiyun bool i915_request_retire(struct i915_request *rq);
319*4882a593Smuzhiyun void i915_request_retire_upto(struct i915_request *rq);
320*4882a593Smuzhiyun
321*4882a593Smuzhiyun static inline struct i915_request *
to_request(struct dma_fence * fence)322*4882a593Smuzhiyun to_request(struct dma_fence *fence)
323*4882a593Smuzhiyun {
324*4882a593Smuzhiyun /* We assume that NULL fence/request are interoperable */
325*4882a593Smuzhiyun BUILD_BUG_ON(offsetof(struct i915_request, fence) != 0);
326*4882a593Smuzhiyun GEM_BUG_ON(fence && !dma_fence_is_i915(fence));
327*4882a593Smuzhiyun return container_of(fence, struct i915_request, fence);
328*4882a593Smuzhiyun }
329*4882a593Smuzhiyun
330*4882a593Smuzhiyun static inline struct i915_request *
i915_request_get(struct i915_request * rq)331*4882a593Smuzhiyun i915_request_get(struct i915_request *rq)
332*4882a593Smuzhiyun {
333*4882a593Smuzhiyun return to_request(dma_fence_get(&rq->fence));
334*4882a593Smuzhiyun }
335*4882a593Smuzhiyun
336*4882a593Smuzhiyun static inline struct i915_request *
i915_request_get_rcu(struct i915_request * rq)337*4882a593Smuzhiyun i915_request_get_rcu(struct i915_request *rq)
338*4882a593Smuzhiyun {
339*4882a593Smuzhiyun return to_request(dma_fence_get_rcu(&rq->fence));
340*4882a593Smuzhiyun }
341*4882a593Smuzhiyun
342*4882a593Smuzhiyun static inline void
i915_request_put(struct i915_request * rq)343*4882a593Smuzhiyun i915_request_put(struct i915_request *rq)
344*4882a593Smuzhiyun {
345*4882a593Smuzhiyun dma_fence_put(&rq->fence);
346*4882a593Smuzhiyun }
347*4882a593Smuzhiyun
348*4882a593Smuzhiyun int i915_request_await_object(struct i915_request *to,
349*4882a593Smuzhiyun struct drm_i915_gem_object *obj,
350*4882a593Smuzhiyun bool write);
351*4882a593Smuzhiyun int i915_request_await_dma_fence(struct i915_request *rq,
352*4882a593Smuzhiyun struct dma_fence *fence);
353*4882a593Smuzhiyun int i915_request_await_execution(struct i915_request *rq,
354*4882a593Smuzhiyun struct dma_fence *fence,
355*4882a593Smuzhiyun void (*hook)(struct i915_request *rq,
356*4882a593Smuzhiyun struct dma_fence *signal));
357*4882a593Smuzhiyun
358*4882a593Smuzhiyun void i915_request_add(struct i915_request *rq);
359*4882a593Smuzhiyun
360*4882a593Smuzhiyun bool __i915_request_submit(struct i915_request *request);
361*4882a593Smuzhiyun void i915_request_submit(struct i915_request *request);
362*4882a593Smuzhiyun
363*4882a593Smuzhiyun void __i915_request_unsubmit(struct i915_request *request);
364*4882a593Smuzhiyun void i915_request_unsubmit(struct i915_request *request);
365*4882a593Smuzhiyun
366*4882a593Smuzhiyun long i915_request_wait(struct i915_request *rq,
367*4882a593Smuzhiyun unsigned int flags,
368*4882a593Smuzhiyun long timeout)
369*4882a593Smuzhiyun __attribute__((nonnull(1)));
370*4882a593Smuzhiyun #define I915_WAIT_INTERRUPTIBLE BIT(0)
371*4882a593Smuzhiyun #define I915_WAIT_PRIORITY BIT(1) /* small priority bump for the request */
372*4882a593Smuzhiyun #define I915_WAIT_ALL BIT(2) /* used by i915_gem_object_wait() */
373*4882a593Smuzhiyun
i915_request_signaled(const struct i915_request * rq)374*4882a593Smuzhiyun static inline bool i915_request_signaled(const struct i915_request *rq)
375*4882a593Smuzhiyun {
376*4882a593Smuzhiyun /* The request may live longer than its HWSP, so check flags first! */
377*4882a593Smuzhiyun return test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &rq->fence.flags);
378*4882a593Smuzhiyun }
379*4882a593Smuzhiyun
i915_request_is_active(const struct i915_request * rq)380*4882a593Smuzhiyun static inline bool i915_request_is_active(const struct i915_request *rq)
381*4882a593Smuzhiyun {
382*4882a593Smuzhiyun return test_bit(I915_FENCE_FLAG_ACTIVE, &rq->fence.flags);
383*4882a593Smuzhiyun }
384*4882a593Smuzhiyun
i915_request_in_priority_queue(const struct i915_request * rq)385*4882a593Smuzhiyun static inline bool i915_request_in_priority_queue(const struct i915_request *rq)
386*4882a593Smuzhiyun {
387*4882a593Smuzhiyun return test_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags);
388*4882a593Smuzhiyun }
389*4882a593Smuzhiyun
390*4882a593Smuzhiyun static inline bool
i915_request_has_initial_breadcrumb(const struct i915_request * rq)391*4882a593Smuzhiyun i915_request_has_initial_breadcrumb(const struct i915_request *rq)
392*4882a593Smuzhiyun {
393*4882a593Smuzhiyun return test_bit(I915_FENCE_FLAG_INITIAL_BREADCRUMB, &rq->fence.flags);
394*4882a593Smuzhiyun }
395*4882a593Smuzhiyun
396*4882a593Smuzhiyun /**
397*4882a593Smuzhiyun * Returns true if seq1 is later than seq2.
398*4882a593Smuzhiyun */
i915_seqno_passed(u32 seq1,u32 seq2)399*4882a593Smuzhiyun static inline bool i915_seqno_passed(u32 seq1, u32 seq2)
400*4882a593Smuzhiyun {
401*4882a593Smuzhiyun return (s32)(seq1 - seq2) >= 0;
402*4882a593Smuzhiyun }
403*4882a593Smuzhiyun
__hwsp_seqno(const struct i915_request * rq)404*4882a593Smuzhiyun static inline u32 __hwsp_seqno(const struct i915_request *rq)
405*4882a593Smuzhiyun {
406*4882a593Smuzhiyun const u32 *hwsp = READ_ONCE(rq->hwsp_seqno);
407*4882a593Smuzhiyun
408*4882a593Smuzhiyun return READ_ONCE(*hwsp);
409*4882a593Smuzhiyun }
410*4882a593Smuzhiyun
411*4882a593Smuzhiyun /**
412*4882a593Smuzhiyun * hwsp_seqno - the current breadcrumb value in the HW status page
413*4882a593Smuzhiyun * @rq: the request, to chase the relevant HW status page
414*4882a593Smuzhiyun *
415*4882a593Smuzhiyun * The emphasis in naming here is that hwsp_seqno() is not a property of the
416*4882a593Smuzhiyun * request, but an indication of the current HW state (associated with this
417*4882a593Smuzhiyun * request). Its value will change as the GPU executes more requests.
418*4882a593Smuzhiyun *
419*4882a593Smuzhiyun * Returns the current breadcrumb value in the associated HW status page (or
420*4882a593Smuzhiyun * the local timeline's equivalent) for this request. The request itself
421*4882a593Smuzhiyun * has the associated breadcrumb value of rq->fence.seqno, when the HW
422*4882a593Smuzhiyun * status page has that breadcrumb or later, this request is complete.
423*4882a593Smuzhiyun */
hwsp_seqno(const struct i915_request * rq)424*4882a593Smuzhiyun static inline u32 hwsp_seqno(const struct i915_request *rq)
425*4882a593Smuzhiyun {
426*4882a593Smuzhiyun u32 seqno;
427*4882a593Smuzhiyun
428*4882a593Smuzhiyun rcu_read_lock(); /* the HWSP may be freed at runtime */
429*4882a593Smuzhiyun seqno = __hwsp_seqno(rq);
430*4882a593Smuzhiyun rcu_read_unlock();
431*4882a593Smuzhiyun
432*4882a593Smuzhiyun return seqno;
433*4882a593Smuzhiyun }
434*4882a593Smuzhiyun
__i915_request_has_started(const struct i915_request * rq)435*4882a593Smuzhiyun static inline bool __i915_request_has_started(const struct i915_request *rq)
436*4882a593Smuzhiyun {
437*4882a593Smuzhiyun return i915_seqno_passed(__hwsp_seqno(rq), rq->fence.seqno - 1);
438*4882a593Smuzhiyun }
439*4882a593Smuzhiyun
440*4882a593Smuzhiyun /**
441*4882a593Smuzhiyun * i915_request_started - check if the request has begun being executed
442*4882a593Smuzhiyun * @rq: the request
443*4882a593Smuzhiyun *
444*4882a593Smuzhiyun * If the timeline is not using initial breadcrumbs, a request is
445*4882a593Smuzhiyun * considered started if the previous request on its timeline (i.e.
446*4882a593Smuzhiyun * context) has been signaled.
447*4882a593Smuzhiyun *
448*4882a593Smuzhiyun * If the timeline is using semaphores, it will also be emitting an
449*4882a593Smuzhiyun * "initial breadcrumb" after the semaphores are complete and just before
450*4882a593Smuzhiyun * it began executing the user payload. A request can therefore be active
451*4882a593Smuzhiyun * on the HW and not yet started as it is still busywaiting on its
452*4882a593Smuzhiyun * dependencies (via HW semaphores).
453*4882a593Smuzhiyun *
454*4882a593Smuzhiyun * If the request has started, its dependencies will have been signaled
455*4882a593Smuzhiyun * (either by fences or by semaphores) and it will have begun processing
456*4882a593Smuzhiyun * the user payload.
457*4882a593Smuzhiyun *
458*4882a593Smuzhiyun * However, even if a request has started, it may have been preempted and
459*4882a593Smuzhiyun * so no longer active, or it may have already completed.
460*4882a593Smuzhiyun *
461*4882a593Smuzhiyun * See also i915_request_is_active().
462*4882a593Smuzhiyun *
463*4882a593Smuzhiyun * Returns true if the request has begun executing the user payload, or
464*4882a593Smuzhiyun * has completed:
465*4882a593Smuzhiyun */
i915_request_started(const struct i915_request * rq)466*4882a593Smuzhiyun static inline bool i915_request_started(const struct i915_request *rq)
467*4882a593Smuzhiyun {
468*4882a593Smuzhiyun bool result;
469*4882a593Smuzhiyun
470*4882a593Smuzhiyun if (i915_request_signaled(rq))
471*4882a593Smuzhiyun return true;
472*4882a593Smuzhiyun
473*4882a593Smuzhiyun result = true;
474*4882a593Smuzhiyun rcu_read_lock(); /* the HWSP may be freed at runtime */
475*4882a593Smuzhiyun if (likely(!i915_request_signaled(rq)))
476*4882a593Smuzhiyun /* Remember: started but may have since been preempted! */
477*4882a593Smuzhiyun result = __i915_request_has_started(rq);
478*4882a593Smuzhiyun rcu_read_unlock();
479*4882a593Smuzhiyun
480*4882a593Smuzhiyun return result;
481*4882a593Smuzhiyun }
482*4882a593Smuzhiyun
483*4882a593Smuzhiyun /**
484*4882a593Smuzhiyun * i915_request_is_running - check if the request may actually be executing
485*4882a593Smuzhiyun * @rq: the request
486*4882a593Smuzhiyun *
487*4882a593Smuzhiyun * Returns true if the request is currently submitted to hardware, has passed
488*4882a593Smuzhiyun * its start point (i.e. the context is setup and not busywaiting). Note that
489*4882a593Smuzhiyun * it may no longer be running by the time the function returns!
490*4882a593Smuzhiyun */
i915_request_is_running(const struct i915_request * rq)491*4882a593Smuzhiyun static inline bool i915_request_is_running(const struct i915_request *rq)
492*4882a593Smuzhiyun {
493*4882a593Smuzhiyun bool result;
494*4882a593Smuzhiyun
495*4882a593Smuzhiyun if (!i915_request_is_active(rq))
496*4882a593Smuzhiyun return false;
497*4882a593Smuzhiyun
498*4882a593Smuzhiyun rcu_read_lock();
499*4882a593Smuzhiyun result = __i915_request_has_started(rq) && i915_request_is_active(rq);
500*4882a593Smuzhiyun rcu_read_unlock();
501*4882a593Smuzhiyun
502*4882a593Smuzhiyun return result;
503*4882a593Smuzhiyun }
504*4882a593Smuzhiyun
505*4882a593Smuzhiyun /**
506*4882a593Smuzhiyun * i915_request_is_ready - check if the request is ready for execution
507*4882a593Smuzhiyun * @rq: the request
508*4882a593Smuzhiyun *
509*4882a593Smuzhiyun * Upon construction, the request is instructed to wait upon various
510*4882a593Smuzhiyun * signals before it is ready to be executed by the HW. That is, we do
511*4882a593Smuzhiyun * not want to start execution and read data before it is written. In practice,
512*4882a593Smuzhiyun * this is controlled with a mixture of interrupts and semaphores. Once
513*4882a593Smuzhiyun * the submit fence is completed, the backend scheduler will place the
514*4882a593Smuzhiyun * request into its queue and from there submit it for execution. So we
515*4882a593Smuzhiyun * can detect when a request is eligible for execution (and is under control
516*4882a593Smuzhiyun * of the scheduler) by querying where it is in any of the scheduler's lists.
517*4882a593Smuzhiyun *
518*4882a593Smuzhiyun * Returns true if the request is ready for execution (it may be inflight),
519*4882a593Smuzhiyun * false otherwise.
520*4882a593Smuzhiyun */
i915_request_is_ready(const struct i915_request * rq)521*4882a593Smuzhiyun static inline bool i915_request_is_ready(const struct i915_request *rq)
522*4882a593Smuzhiyun {
523*4882a593Smuzhiyun return !list_empty(&rq->sched.link);
524*4882a593Smuzhiyun }
525*4882a593Smuzhiyun
__i915_request_is_complete(const struct i915_request * rq)526*4882a593Smuzhiyun static inline bool __i915_request_is_complete(const struct i915_request *rq)
527*4882a593Smuzhiyun {
528*4882a593Smuzhiyun return i915_seqno_passed(__hwsp_seqno(rq), rq->fence.seqno);
529*4882a593Smuzhiyun }
530*4882a593Smuzhiyun
i915_request_completed(const struct i915_request * rq)531*4882a593Smuzhiyun static inline bool i915_request_completed(const struct i915_request *rq)
532*4882a593Smuzhiyun {
533*4882a593Smuzhiyun bool result;
534*4882a593Smuzhiyun
535*4882a593Smuzhiyun if (i915_request_signaled(rq))
536*4882a593Smuzhiyun return true;
537*4882a593Smuzhiyun
538*4882a593Smuzhiyun result = true;
539*4882a593Smuzhiyun rcu_read_lock(); /* the HWSP may be freed at runtime */
540*4882a593Smuzhiyun if (likely(!i915_request_signaled(rq)))
541*4882a593Smuzhiyun result = __i915_request_is_complete(rq);
542*4882a593Smuzhiyun rcu_read_unlock();
543*4882a593Smuzhiyun
544*4882a593Smuzhiyun return result;
545*4882a593Smuzhiyun }
546*4882a593Smuzhiyun
i915_request_mark_complete(struct i915_request * rq)547*4882a593Smuzhiyun static inline void i915_request_mark_complete(struct i915_request *rq)
548*4882a593Smuzhiyun {
549*4882a593Smuzhiyun WRITE_ONCE(rq->hwsp_seqno, /* decouple from HWSP */
550*4882a593Smuzhiyun (u32 *)&rq->fence.seqno);
551*4882a593Smuzhiyun }
552*4882a593Smuzhiyun
i915_request_has_waitboost(const struct i915_request * rq)553*4882a593Smuzhiyun static inline bool i915_request_has_waitboost(const struct i915_request *rq)
554*4882a593Smuzhiyun {
555*4882a593Smuzhiyun return test_bit(I915_FENCE_FLAG_BOOST, &rq->fence.flags);
556*4882a593Smuzhiyun }
557*4882a593Smuzhiyun
i915_request_has_nopreempt(const struct i915_request * rq)558*4882a593Smuzhiyun static inline bool i915_request_has_nopreempt(const struct i915_request *rq)
559*4882a593Smuzhiyun {
560*4882a593Smuzhiyun /* Preemption should only be disabled very rarely */
561*4882a593Smuzhiyun return unlikely(test_bit(I915_FENCE_FLAG_NOPREEMPT, &rq->fence.flags));
562*4882a593Smuzhiyun }
563*4882a593Smuzhiyun
i915_request_has_sentinel(const struct i915_request * rq)564*4882a593Smuzhiyun static inline bool i915_request_has_sentinel(const struct i915_request *rq)
565*4882a593Smuzhiyun {
566*4882a593Smuzhiyun return unlikely(test_bit(I915_FENCE_FLAG_SENTINEL, &rq->fence.flags));
567*4882a593Smuzhiyun }
568*4882a593Smuzhiyun
i915_request_on_hold(const struct i915_request * rq)569*4882a593Smuzhiyun static inline bool i915_request_on_hold(const struct i915_request *rq)
570*4882a593Smuzhiyun {
571*4882a593Smuzhiyun return unlikely(test_bit(I915_FENCE_FLAG_HOLD, &rq->fence.flags));
572*4882a593Smuzhiyun }
573*4882a593Smuzhiyun
i915_request_set_hold(struct i915_request * rq)574*4882a593Smuzhiyun static inline void i915_request_set_hold(struct i915_request *rq)
575*4882a593Smuzhiyun {
576*4882a593Smuzhiyun set_bit(I915_FENCE_FLAG_HOLD, &rq->fence.flags);
577*4882a593Smuzhiyun }
578*4882a593Smuzhiyun
i915_request_clear_hold(struct i915_request * rq)579*4882a593Smuzhiyun static inline void i915_request_clear_hold(struct i915_request *rq)
580*4882a593Smuzhiyun {
581*4882a593Smuzhiyun clear_bit(I915_FENCE_FLAG_HOLD, &rq->fence.flags);
582*4882a593Smuzhiyun }
583*4882a593Smuzhiyun
584*4882a593Smuzhiyun static inline struct intel_timeline *
i915_request_timeline(const struct i915_request * rq)585*4882a593Smuzhiyun i915_request_timeline(const struct i915_request *rq)
586*4882a593Smuzhiyun {
587*4882a593Smuzhiyun /* Valid only while the request is being constructed (or retired). */
588*4882a593Smuzhiyun return rcu_dereference_protected(rq->timeline,
589*4882a593Smuzhiyun lockdep_is_held(&rcu_access_pointer(rq->timeline)->mutex));
590*4882a593Smuzhiyun }
591*4882a593Smuzhiyun
592*4882a593Smuzhiyun static inline struct i915_gem_context *
i915_request_gem_context(const struct i915_request * rq)593*4882a593Smuzhiyun i915_request_gem_context(const struct i915_request *rq)
594*4882a593Smuzhiyun {
595*4882a593Smuzhiyun /* Valid only while the request is being constructed (or retired). */
596*4882a593Smuzhiyun return rcu_dereference_protected(rq->context->gem_context, true);
597*4882a593Smuzhiyun }
598*4882a593Smuzhiyun
599*4882a593Smuzhiyun static inline struct intel_timeline *
i915_request_active_timeline(const struct i915_request * rq)600*4882a593Smuzhiyun i915_request_active_timeline(const struct i915_request *rq)
601*4882a593Smuzhiyun {
602*4882a593Smuzhiyun /*
603*4882a593Smuzhiyun * When in use during submission, we are protected by a guarantee that
604*4882a593Smuzhiyun * the context/timeline is pinned and must remain pinned until after
605*4882a593Smuzhiyun * this submission.
606*4882a593Smuzhiyun */
607*4882a593Smuzhiyun return rcu_dereference_protected(rq->timeline,
608*4882a593Smuzhiyun lockdep_is_held(&rq->engine->active.lock));
609*4882a593Smuzhiyun }
610*4882a593Smuzhiyun
611*4882a593Smuzhiyun #endif /* I915_REQUEST_H */
612