xref: /OK3568_Linux_fs/kernel/include/drm/gpu_scheduler.h (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun  * Copyright 2015 Advanced Micro Devices, Inc.
3*4882a593Smuzhiyun  *
4*4882a593Smuzhiyun  * Permission is hereby granted, free of charge, to any person obtaining a
5*4882a593Smuzhiyun  * copy of this software and associated documentation files (the "Software"),
6*4882a593Smuzhiyun  * to deal in the Software without restriction, including without limitation
7*4882a593Smuzhiyun  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8*4882a593Smuzhiyun  * and/or sell copies of the Software, and to permit persons to whom the
9*4882a593Smuzhiyun  * Software is furnished to do so, subject to the following conditions:
10*4882a593Smuzhiyun  *
11*4882a593Smuzhiyun  * The above copyright notice and this permission notice shall be included in
12*4882a593Smuzhiyun  * all copies or substantial portions of the Software.
13*4882a593Smuzhiyun  *
14*4882a593Smuzhiyun  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15*4882a593Smuzhiyun  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16*4882a593Smuzhiyun  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17*4882a593Smuzhiyun  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18*4882a593Smuzhiyun  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19*4882a593Smuzhiyun  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20*4882a593Smuzhiyun  * OTHER DEALINGS IN THE SOFTWARE.
21*4882a593Smuzhiyun  *
22*4882a593Smuzhiyun  */
23*4882a593Smuzhiyun 
24*4882a593Smuzhiyun #ifndef _DRM_GPU_SCHEDULER_H_
25*4882a593Smuzhiyun #define _DRM_GPU_SCHEDULER_H_
26*4882a593Smuzhiyun 
27*4882a593Smuzhiyun #include <drm/spsc_queue.h>
28*4882a593Smuzhiyun #include <linux/dma-fence.h>
29*4882a593Smuzhiyun #include <linux/completion.h>
30*4882a593Smuzhiyun 
31*4882a593Smuzhiyun #define MAX_WAIT_SCHED_ENTITY_Q_EMPTY msecs_to_jiffies(1000)
32*4882a593Smuzhiyun 
33*4882a593Smuzhiyun struct drm_gpu_scheduler;
34*4882a593Smuzhiyun struct drm_sched_rq;
35*4882a593Smuzhiyun 
36*4882a593Smuzhiyun /* These are often used as an (initial) index
37*4882a593Smuzhiyun  * to an array, and as such should start at 0.
38*4882a593Smuzhiyun  */
39*4882a593Smuzhiyun enum drm_sched_priority {
40*4882a593Smuzhiyun 	DRM_SCHED_PRIORITY_MIN,
41*4882a593Smuzhiyun 	DRM_SCHED_PRIORITY_NORMAL,
42*4882a593Smuzhiyun 	DRM_SCHED_PRIORITY_HIGH,
43*4882a593Smuzhiyun 	DRM_SCHED_PRIORITY_KERNEL,
44*4882a593Smuzhiyun 
45*4882a593Smuzhiyun 	DRM_SCHED_PRIORITY_COUNT,
46*4882a593Smuzhiyun 	DRM_SCHED_PRIORITY_UNSET = -2
47*4882a593Smuzhiyun };
48*4882a593Smuzhiyun 
49*4882a593Smuzhiyun /**
50*4882a593Smuzhiyun  * struct drm_sched_entity - A wrapper around a job queue (typically
51*4882a593Smuzhiyun  * attached to the DRM file_priv).
52*4882a593Smuzhiyun  *
53*4882a593Smuzhiyun  * @list: used to append this struct to the list of entities in the
54*4882a593Smuzhiyun  *        runqueue.
55*4882a593Smuzhiyun  * @rq: runqueue on which this entity is currently scheduled.
56*4882a593Smuzhiyun  * @sched_list: A list of schedulers (drm_gpu_schedulers).
57*4882a593Smuzhiyun  *              Jobs from this entity can be scheduled on any scheduler
58*4882a593Smuzhiyun  *              on this list.
59*4882a593Smuzhiyun  * @num_sched_list: number of drm_gpu_schedulers in the sched_list.
60*4882a593Smuzhiyun  * @priority: priority of the entity
61*4882a593Smuzhiyun  * @rq_lock: lock to modify the runqueue to which this entity belongs.
62*4882a593Smuzhiyun  * @job_queue: the list of jobs of this entity.
63*4882a593Smuzhiyun  * @fence_seq: a linearly increasing seqno incremented with each
64*4882a593Smuzhiyun  *             new &drm_sched_fence which is part of the entity.
65*4882a593Smuzhiyun  * @fence_context: a unique context for all the fences which belong
66*4882a593Smuzhiyun  *                 to this entity.
67*4882a593Smuzhiyun  *                 The &drm_sched_fence.scheduled uses the
68*4882a593Smuzhiyun  *                 fence_context but &drm_sched_fence.finished uses
69*4882a593Smuzhiyun  *                 fence_context + 1.
70*4882a593Smuzhiyun  * @dependency: the dependency fence of the job which is on the top
71*4882a593Smuzhiyun  *              of the job queue.
72*4882a593Smuzhiyun  * @cb: callback for the dependency fence above.
73*4882a593Smuzhiyun  * @guilty: points to ctx's guilty.
74*4882a593Smuzhiyun  * @fini_status: contains the exit status in case the process was signalled.
75*4882a593Smuzhiyun  * @last_scheduled: points to the finished fence of the last scheduled job.
76*4882a593Smuzhiyun  * @last_user: last group leader pushing a job into the entity.
77*4882a593Smuzhiyun  * @stopped: Marks the enity as removed from rq and destined for termination.
78*4882a593Smuzhiyun  * @entity_idle: Signals when enityt is not in use
79*4882a593Smuzhiyun  *
80*4882a593Smuzhiyun  * Entities will emit jobs in order to their corresponding hardware
81*4882a593Smuzhiyun  * ring, and the scheduler will alternate between entities based on
82*4882a593Smuzhiyun  * scheduling policy.
83*4882a593Smuzhiyun  */
84*4882a593Smuzhiyun struct drm_sched_entity {
85*4882a593Smuzhiyun 	struct list_head		list;
86*4882a593Smuzhiyun 	struct drm_sched_rq		*rq;
87*4882a593Smuzhiyun 	struct drm_gpu_scheduler        **sched_list;
88*4882a593Smuzhiyun 	unsigned int                    num_sched_list;
89*4882a593Smuzhiyun 	enum drm_sched_priority         priority;
90*4882a593Smuzhiyun 	spinlock_t			rq_lock;
91*4882a593Smuzhiyun 
92*4882a593Smuzhiyun 	struct spsc_queue		job_queue;
93*4882a593Smuzhiyun 
94*4882a593Smuzhiyun 	atomic_t			fence_seq;
95*4882a593Smuzhiyun 	uint64_t			fence_context;
96*4882a593Smuzhiyun 
97*4882a593Smuzhiyun 	struct dma_fence		*dependency;
98*4882a593Smuzhiyun 	struct dma_fence_cb		cb;
99*4882a593Smuzhiyun 	atomic_t			*guilty;
100*4882a593Smuzhiyun 	struct dma_fence                *last_scheduled;
101*4882a593Smuzhiyun 	struct task_struct		*last_user;
102*4882a593Smuzhiyun 	bool 				stopped;
103*4882a593Smuzhiyun 	struct completion		entity_idle;
104*4882a593Smuzhiyun };
105*4882a593Smuzhiyun 
106*4882a593Smuzhiyun /**
107*4882a593Smuzhiyun  * struct drm_sched_rq - queue of entities to be scheduled.
108*4882a593Smuzhiyun  *
109*4882a593Smuzhiyun  * @lock: to modify the entities list.
110*4882a593Smuzhiyun  * @sched: the scheduler to which this rq belongs to.
111*4882a593Smuzhiyun  * @entities: list of the entities to be scheduled.
112*4882a593Smuzhiyun  * @current_entity: the entity which is to be scheduled.
113*4882a593Smuzhiyun  *
114*4882a593Smuzhiyun  * Run queue is a set of entities scheduling command submissions for
115*4882a593Smuzhiyun  * one specific ring. It implements the scheduling policy that selects
116*4882a593Smuzhiyun  * the next entity to emit commands from.
117*4882a593Smuzhiyun  */
118*4882a593Smuzhiyun struct drm_sched_rq {
119*4882a593Smuzhiyun 	spinlock_t			lock;
120*4882a593Smuzhiyun 	struct drm_gpu_scheduler	*sched;
121*4882a593Smuzhiyun 	struct list_head		entities;
122*4882a593Smuzhiyun 	struct drm_sched_entity		*current_entity;
123*4882a593Smuzhiyun };
124*4882a593Smuzhiyun 
125*4882a593Smuzhiyun /**
126*4882a593Smuzhiyun  * struct drm_sched_fence - fences corresponding to the scheduling of a job.
127*4882a593Smuzhiyun  */
128*4882a593Smuzhiyun struct drm_sched_fence {
129*4882a593Smuzhiyun         /**
130*4882a593Smuzhiyun          * @scheduled: this fence is what will be signaled by the scheduler
131*4882a593Smuzhiyun          * when the job is scheduled.
132*4882a593Smuzhiyun          */
133*4882a593Smuzhiyun 	struct dma_fence		scheduled;
134*4882a593Smuzhiyun 
135*4882a593Smuzhiyun         /**
136*4882a593Smuzhiyun          * @finished: this fence is what will be signaled by the scheduler
137*4882a593Smuzhiyun          * when the job is completed.
138*4882a593Smuzhiyun          *
139*4882a593Smuzhiyun          * When setting up an out fence for the job, you should use
140*4882a593Smuzhiyun          * this, since it's available immediately upon
141*4882a593Smuzhiyun          * drm_sched_job_init(), and the fence returned by the driver
142*4882a593Smuzhiyun          * from run_job() won't be created until the dependencies have
143*4882a593Smuzhiyun          * resolved.
144*4882a593Smuzhiyun          */
145*4882a593Smuzhiyun 	struct dma_fence		finished;
146*4882a593Smuzhiyun 
147*4882a593Smuzhiyun         /**
148*4882a593Smuzhiyun          * @parent: the fence returned by &drm_sched_backend_ops.run_job
149*4882a593Smuzhiyun          * when scheduling the job on hardware. We signal the
150*4882a593Smuzhiyun          * &drm_sched_fence.finished fence once parent is signalled.
151*4882a593Smuzhiyun          */
152*4882a593Smuzhiyun 	struct dma_fence		*parent;
153*4882a593Smuzhiyun         /**
154*4882a593Smuzhiyun          * @sched: the scheduler instance to which the job having this struct
155*4882a593Smuzhiyun          * belongs to.
156*4882a593Smuzhiyun          */
157*4882a593Smuzhiyun 	struct drm_gpu_scheduler	*sched;
158*4882a593Smuzhiyun         /**
159*4882a593Smuzhiyun          * @lock: the lock used by the scheduled and the finished fences.
160*4882a593Smuzhiyun          */
161*4882a593Smuzhiyun 	spinlock_t			lock;
162*4882a593Smuzhiyun         /**
163*4882a593Smuzhiyun          * @owner: job owner for debugging
164*4882a593Smuzhiyun          */
165*4882a593Smuzhiyun 	void				*owner;
166*4882a593Smuzhiyun };
167*4882a593Smuzhiyun 
168*4882a593Smuzhiyun struct drm_sched_fence *to_drm_sched_fence(struct dma_fence *f);
169*4882a593Smuzhiyun 
170*4882a593Smuzhiyun /**
171*4882a593Smuzhiyun  * struct drm_sched_job - A job to be run by an entity.
172*4882a593Smuzhiyun  *
173*4882a593Smuzhiyun  * @queue_node: used to append this struct to the queue of jobs in an entity.
174*4882a593Smuzhiyun  * @sched: the scheduler instance on which this job is scheduled.
175*4882a593Smuzhiyun  * @s_fence: contains the fences for the scheduling of job.
176*4882a593Smuzhiyun  * @finish_cb: the callback for the finished fence.
177*4882a593Smuzhiyun  * @node: used to append this struct to the @drm_gpu_scheduler.ring_mirror_list.
178*4882a593Smuzhiyun  * @id: a unique id assigned to each job scheduled on the scheduler.
179*4882a593Smuzhiyun  * @karma: increment on every hang caused by this job. If this exceeds the hang
180*4882a593Smuzhiyun  *         limit of the scheduler then the job is marked guilty and will not
181*4882a593Smuzhiyun  *         be scheduled further.
182*4882a593Smuzhiyun  * @s_priority: the priority of the job.
183*4882a593Smuzhiyun  * @entity: the entity to which this job belongs.
184*4882a593Smuzhiyun  * @cb: the callback for the parent fence in s_fence.
185*4882a593Smuzhiyun  *
186*4882a593Smuzhiyun  * A job is created by the driver using drm_sched_job_init(), and
187*4882a593Smuzhiyun  * should call drm_sched_entity_push_job() once it wants the scheduler
188*4882a593Smuzhiyun  * to schedule the job.
189*4882a593Smuzhiyun  */
190*4882a593Smuzhiyun struct drm_sched_job {
191*4882a593Smuzhiyun 	struct spsc_node		queue_node;
192*4882a593Smuzhiyun 	struct drm_gpu_scheduler	*sched;
193*4882a593Smuzhiyun 	struct drm_sched_fence		*s_fence;
194*4882a593Smuzhiyun 	struct dma_fence_cb		finish_cb;
195*4882a593Smuzhiyun 	struct list_head		node;
196*4882a593Smuzhiyun 	uint64_t			id;
197*4882a593Smuzhiyun 	atomic_t			karma;
198*4882a593Smuzhiyun 	enum drm_sched_priority		s_priority;
199*4882a593Smuzhiyun 	struct drm_sched_entity  *entity;
200*4882a593Smuzhiyun 	struct dma_fence_cb		cb;
201*4882a593Smuzhiyun };
202*4882a593Smuzhiyun 
drm_sched_invalidate_job(struct drm_sched_job * s_job,int threshold)203*4882a593Smuzhiyun static inline bool drm_sched_invalidate_job(struct drm_sched_job *s_job,
204*4882a593Smuzhiyun 					    int threshold)
205*4882a593Smuzhiyun {
206*4882a593Smuzhiyun 	return (s_job && atomic_inc_return(&s_job->karma) > threshold);
207*4882a593Smuzhiyun }
208*4882a593Smuzhiyun 
209*4882a593Smuzhiyun /**
210*4882a593Smuzhiyun  * struct drm_sched_backend_ops
211*4882a593Smuzhiyun  *
212*4882a593Smuzhiyun  * Define the backend operations called by the scheduler,
213*4882a593Smuzhiyun  * these functions should be implemented in driver side.
214*4882a593Smuzhiyun  */
215*4882a593Smuzhiyun struct drm_sched_backend_ops {
216*4882a593Smuzhiyun 	/**
217*4882a593Smuzhiyun          * @dependency: Called when the scheduler is considering scheduling
218*4882a593Smuzhiyun          * this job next, to get another struct dma_fence for this job to
219*4882a593Smuzhiyun 	 * block on.  Once it returns NULL, run_job() may be called.
220*4882a593Smuzhiyun 	 */
221*4882a593Smuzhiyun 	struct dma_fence *(*dependency)(struct drm_sched_job *sched_job,
222*4882a593Smuzhiyun 					struct drm_sched_entity *s_entity);
223*4882a593Smuzhiyun 
224*4882a593Smuzhiyun 	/**
225*4882a593Smuzhiyun          * @run_job: Called to execute the job once all of the dependencies
226*4882a593Smuzhiyun          * have been resolved.  This may be called multiple times, if
227*4882a593Smuzhiyun 	 * timedout_job() has happened and drm_sched_job_recovery()
228*4882a593Smuzhiyun 	 * decides to try it again.
229*4882a593Smuzhiyun 	 */
230*4882a593Smuzhiyun 	struct dma_fence *(*run_job)(struct drm_sched_job *sched_job);
231*4882a593Smuzhiyun 
232*4882a593Smuzhiyun 	/**
233*4882a593Smuzhiyun          * @timedout_job: Called when a job has taken too long to execute,
234*4882a593Smuzhiyun          * to trigger GPU recovery.
235*4882a593Smuzhiyun 	 */
236*4882a593Smuzhiyun 	void (*timedout_job)(struct drm_sched_job *sched_job);
237*4882a593Smuzhiyun 
238*4882a593Smuzhiyun 	/**
239*4882a593Smuzhiyun          * @free_job: Called once the job's finished fence has been signaled
240*4882a593Smuzhiyun          * and it's time to clean it up.
241*4882a593Smuzhiyun 	 */
242*4882a593Smuzhiyun 	void (*free_job)(struct drm_sched_job *sched_job);
243*4882a593Smuzhiyun };
244*4882a593Smuzhiyun 
245*4882a593Smuzhiyun /**
246*4882a593Smuzhiyun  * struct drm_gpu_scheduler
247*4882a593Smuzhiyun  *
248*4882a593Smuzhiyun  * @ops: backend operations provided by the driver.
249*4882a593Smuzhiyun  * @hw_submission_limit: the max size of the hardware queue.
250*4882a593Smuzhiyun  * @timeout: the time after which a job is removed from the scheduler.
251*4882a593Smuzhiyun  * @name: name of the ring for which this scheduler is being used.
252*4882a593Smuzhiyun  * @sched_rq: priority wise array of run queues.
253*4882a593Smuzhiyun  * @wake_up_worker: the wait queue on which the scheduler sleeps until a job
254*4882a593Smuzhiyun  *                  is ready to be scheduled.
255*4882a593Smuzhiyun  * @job_scheduled: once @drm_sched_entity_do_release is called the scheduler
256*4882a593Smuzhiyun  *                 waits on this wait queue until all the scheduled jobs are
257*4882a593Smuzhiyun  *                 finished.
258*4882a593Smuzhiyun  * @hw_rq_count: the number of jobs currently in the hardware queue.
259*4882a593Smuzhiyun  * @job_id_count: used to assign unique id to the each job.
260*4882a593Smuzhiyun  * @work_tdr: schedules a delayed call to @drm_sched_job_timedout after the
261*4882a593Smuzhiyun  *            timeout interval is over.
262*4882a593Smuzhiyun  * @thread: the kthread on which the scheduler which run.
263*4882a593Smuzhiyun  * @ring_mirror_list: the list of jobs which are currently in the job queue.
264*4882a593Smuzhiyun  * @job_list_lock: lock to protect the ring_mirror_list.
265*4882a593Smuzhiyun  * @hang_limit: once the hangs by a job crosses this limit then it is marked
266*4882a593Smuzhiyun  *              guilty and it will be considered for scheduling further.
267*4882a593Smuzhiyun  * @score: score to help loadbalancer pick a idle sched
268*4882a593Smuzhiyun  * @ready: marks if the underlying HW is ready to work
269*4882a593Smuzhiyun  * @free_guilty: A hit to time out handler to free the guilty job.
270*4882a593Smuzhiyun  *
271*4882a593Smuzhiyun  * One scheduler is implemented for each hardware ring.
272*4882a593Smuzhiyun  */
273*4882a593Smuzhiyun struct drm_gpu_scheduler {
274*4882a593Smuzhiyun 	const struct drm_sched_backend_ops	*ops;
275*4882a593Smuzhiyun 	uint32_t			hw_submission_limit;
276*4882a593Smuzhiyun 	long				timeout;
277*4882a593Smuzhiyun 	const char			*name;
278*4882a593Smuzhiyun 	struct drm_sched_rq		sched_rq[DRM_SCHED_PRIORITY_COUNT];
279*4882a593Smuzhiyun 	wait_queue_head_t		wake_up_worker;
280*4882a593Smuzhiyun 	wait_queue_head_t		job_scheduled;
281*4882a593Smuzhiyun 	atomic_t			hw_rq_count;
282*4882a593Smuzhiyun 	atomic64_t			job_id_count;
283*4882a593Smuzhiyun 	struct delayed_work		work_tdr;
284*4882a593Smuzhiyun 	struct task_struct		*thread;
285*4882a593Smuzhiyun 	struct list_head		ring_mirror_list;
286*4882a593Smuzhiyun 	spinlock_t			job_list_lock;
287*4882a593Smuzhiyun 	int				hang_limit;
288*4882a593Smuzhiyun 	atomic_t                        score;
289*4882a593Smuzhiyun 	bool				ready;
290*4882a593Smuzhiyun 	bool				free_guilty;
291*4882a593Smuzhiyun };
292*4882a593Smuzhiyun 
293*4882a593Smuzhiyun int drm_sched_init(struct drm_gpu_scheduler *sched,
294*4882a593Smuzhiyun 		   const struct drm_sched_backend_ops *ops,
295*4882a593Smuzhiyun 		   uint32_t hw_submission, unsigned hang_limit, long timeout,
296*4882a593Smuzhiyun 		   const char *name);
297*4882a593Smuzhiyun 
298*4882a593Smuzhiyun void drm_sched_fini(struct drm_gpu_scheduler *sched);
299*4882a593Smuzhiyun int drm_sched_job_init(struct drm_sched_job *job,
300*4882a593Smuzhiyun 		       struct drm_sched_entity *entity,
301*4882a593Smuzhiyun 		       void *owner);
302*4882a593Smuzhiyun void drm_sched_entity_modify_sched(struct drm_sched_entity *entity,
303*4882a593Smuzhiyun 				    struct drm_gpu_scheduler **sched_list,
304*4882a593Smuzhiyun                                    unsigned int num_sched_list);
305*4882a593Smuzhiyun 
306*4882a593Smuzhiyun void drm_sched_job_cleanup(struct drm_sched_job *job);
307*4882a593Smuzhiyun void drm_sched_wakeup(struct drm_gpu_scheduler *sched);
308*4882a593Smuzhiyun void drm_sched_stop(struct drm_gpu_scheduler *sched, struct drm_sched_job *bad);
309*4882a593Smuzhiyun void drm_sched_start(struct drm_gpu_scheduler *sched, bool full_recovery);
310*4882a593Smuzhiyun void drm_sched_resubmit_jobs(struct drm_gpu_scheduler *sched);
311*4882a593Smuzhiyun void drm_sched_increase_karma(struct drm_sched_job *bad);
312*4882a593Smuzhiyun bool drm_sched_dependency_optimized(struct dma_fence* fence,
313*4882a593Smuzhiyun 				    struct drm_sched_entity *entity);
314*4882a593Smuzhiyun void drm_sched_fault(struct drm_gpu_scheduler *sched);
315*4882a593Smuzhiyun void drm_sched_job_kickout(struct drm_sched_job *s_job);
316*4882a593Smuzhiyun 
317*4882a593Smuzhiyun void drm_sched_rq_add_entity(struct drm_sched_rq *rq,
318*4882a593Smuzhiyun 			     struct drm_sched_entity *entity);
319*4882a593Smuzhiyun void drm_sched_rq_remove_entity(struct drm_sched_rq *rq,
320*4882a593Smuzhiyun 				struct drm_sched_entity *entity);
321*4882a593Smuzhiyun 
322*4882a593Smuzhiyun int drm_sched_entity_init(struct drm_sched_entity *entity,
323*4882a593Smuzhiyun 			  enum drm_sched_priority priority,
324*4882a593Smuzhiyun 			  struct drm_gpu_scheduler **sched_list,
325*4882a593Smuzhiyun 			  unsigned int num_sched_list,
326*4882a593Smuzhiyun 			  atomic_t *guilty);
327*4882a593Smuzhiyun long drm_sched_entity_flush(struct drm_sched_entity *entity, long timeout);
328*4882a593Smuzhiyun void drm_sched_entity_fini(struct drm_sched_entity *entity);
329*4882a593Smuzhiyun void drm_sched_entity_destroy(struct drm_sched_entity *entity);
330*4882a593Smuzhiyun void drm_sched_entity_select_rq(struct drm_sched_entity *entity);
331*4882a593Smuzhiyun struct drm_sched_job *drm_sched_entity_pop_job(struct drm_sched_entity *entity);
332*4882a593Smuzhiyun void drm_sched_entity_push_job(struct drm_sched_job *sched_job,
333*4882a593Smuzhiyun 			       struct drm_sched_entity *entity);
334*4882a593Smuzhiyun void drm_sched_entity_set_priority(struct drm_sched_entity *entity,
335*4882a593Smuzhiyun 				   enum drm_sched_priority priority);
336*4882a593Smuzhiyun bool drm_sched_entity_is_ready(struct drm_sched_entity *entity);
337*4882a593Smuzhiyun 
338*4882a593Smuzhiyun struct drm_sched_fence *drm_sched_fence_create(
339*4882a593Smuzhiyun 	struct drm_sched_entity *s_entity, void *owner);
340*4882a593Smuzhiyun void drm_sched_fence_scheduled(struct drm_sched_fence *fence);
341*4882a593Smuzhiyun void drm_sched_fence_finished(struct drm_sched_fence *fence);
342*4882a593Smuzhiyun 
343*4882a593Smuzhiyun unsigned long drm_sched_suspend_timeout(struct drm_gpu_scheduler *sched);
344*4882a593Smuzhiyun void drm_sched_resume_timeout(struct drm_gpu_scheduler *sched,
345*4882a593Smuzhiyun 		                unsigned long remaining);
346*4882a593Smuzhiyun struct drm_gpu_scheduler *
347*4882a593Smuzhiyun drm_sched_pick_best(struct drm_gpu_scheduler **sched_list,
348*4882a593Smuzhiyun 		     unsigned int num_sched_list);
349*4882a593Smuzhiyun 
350*4882a593Smuzhiyun #endif
351