1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun * Copyright 2015 Advanced Micro Devices, Inc.
3*4882a593Smuzhiyun *
4*4882a593Smuzhiyun * Permission is hereby granted, free of charge, to any person obtaining a
5*4882a593Smuzhiyun * copy of this software and associated documentation files (the "Software"),
6*4882a593Smuzhiyun * to deal in the Software without restriction, including without limitation
7*4882a593Smuzhiyun * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8*4882a593Smuzhiyun * and/or sell copies of the Software, and to permit persons to whom the
9*4882a593Smuzhiyun * Software is furnished to do so, subject to the following conditions:
10*4882a593Smuzhiyun *
11*4882a593Smuzhiyun * The above copyright notice and this permission notice shall be included in
12*4882a593Smuzhiyun * all copies or substantial portions of the Software.
13*4882a593Smuzhiyun *
14*4882a593Smuzhiyun * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15*4882a593Smuzhiyun * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16*4882a593Smuzhiyun * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17*4882a593Smuzhiyun * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18*4882a593Smuzhiyun * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19*4882a593Smuzhiyun * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20*4882a593Smuzhiyun * OTHER DEALINGS IN THE SOFTWARE.
21*4882a593Smuzhiyun *
22*4882a593Smuzhiyun */
23*4882a593Smuzhiyun
24*4882a593Smuzhiyun #include <linux/kthread.h>
25*4882a593Smuzhiyun #include <linux/slab.h>
26*4882a593Smuzhiyun #include <linux/completion.h>
27*4882a593Smuzhiyun
28*4882a593Smuzhiyun #include <drm/drm_print.h>
29*4882a593Smuzhiyun #include <drm/gpu_scheduler.h>
30*4882a593Smuzhiyun
31*4882a593Smuzhiyun #include "gpu_scheduler_trace.h"
32*4882a593Smuzhiyun
33*4882a593Smuzhiyun #define to_drm_sched_job(sched_job) \
34*4882a593Smuzhiyun container_of((sched_job), struct drm_sched_job, queue_node)
35*4882a593Smuzhiyun
36*4882a593Smuzhiyun /**
37*4882a593Smuzhiyun * drm_sched_entity_init - Init a context entity used by scheduler when
38*4882a593Smuzhiyun * submit to HW ring.
39*4882a593Smuzhiyun *
40*4882a593Smuzhiyun * @entity: scheduler entity to init
41*4882a593Smuzhiyun * @priority: priority of the entity
42*4882a593Smuzhiyun * @sched_list: the list of drm scheds on which jobs from this
43*4882a593Smuzhiyun * entity can be submitted
44*4882a593Smuzhiyun * @num_sched_list: number of drm sched in sched_list
45*4882a593Smuzhiyun * @guilty: atomic_t set to 1 when a job on this queue
46*4882a593Smuzhiyun * is found to be guilty causing a timeout
47*4882a593Smuzhiyun *
48*4882a593Smuzhiyun * Note: the sched_list should have at least one element to schedule
49*4882a593Smuzhiyun * the entity
50*4882a593Smuzhiyun *
51*4882a593Smuzhiyun * Returns 0 on success or a negative error code on failure.
52*4882a593Smuzhiyun */
drm_sched_entity_init(struct drm_sched_entity * entity,enum drm_sched_priority priority,struct drm_gpu_scheduler ** sched_list,unsigned int num_sched_list,atomic_t * guilty)53*4882a593Smuzhiyun int drm_sched_entity_init(struct drm_sched_entity *entity,
54*4882a593Smuzhiyun enum drm_sched_priority priority,
55*4882a593Smuzhiyun struct drm_gpu_scheduler **sched_list,
56*4882a593Smuzhiyun unsigned int num_sched_list,
57*4882a593Smuzhiyun atomic_t *guilty)
58*4882a593Smuzhiyun {
59*4882a593Smuzhiyun if (!(entity && sched_list && (num_sched_list == 0 || sched_list[0])))
60*4882a593Smuzhiyun return -EINVAL;
61*4882a593Smuzhiyun
62*4882a593Smuzhiyun memset(entity, 0, sizeof(struct drm_sched_entity));
63*4882a593Smuzhiyun INIT_LIST_HEAD(&entity->list);
64*4882a593Smuzhiyun entity->rq = NULL;
65*4882a593Smuzhiyun entity->guilty = guilty;
66*4882a593Smuzhiyun entity->num_sched_list = num_sched_list;
67*4882a593Smuzhiyun entity->priority = priority;
68*4882a593Smuzhiyun entity->sched_list = num_sched_list > 1 ? sched_list : NULL;
69*4882a593Smuzhiyun entity->last_scheduled = NULL;
70*4882a593Smuzhiyun
71*4882a593Smuzhiyun if(num_sched_list)
72*4882a593Smuzhiyun entity->rq = &sched_list[0]->sched_rq[entity->priority];
73*4882a593Smuzhiyun
74*4882a593Smuzhiyun init_completion(&entity->entity_idle);
75*4882a593Smuzhiyun
76*4882a593Smuzhiyun spin_lock_init(&entity->rq_lock);
77*4882a593Smuzhiyun spsc_queue_init(&entity->job_queue);
78*4882a593Smuzhiyun
79*4882a593Smuzhiyun atomic_set(&entity->fence_seq, 0);
80*4882a593Smuzhiyun entity->fence_context = dma_fence_context_alloc(2);
81*4882a593Smuzhiyun
82*4882a593Smuzhiyun return 0;
83*4882a593Smuzhiyun }
84*4882a593Smuzhiyun EXPORT_SYMBOL(drm_sched_entity_init);
85*4882a593Smuzhiyun
86*4882a593Smuzhiyun /**
87*4882a593Smuzhiyun * drm_sched_entity_modify_sched - Modify sched of an entity
88*4882a593Smuzhiyun * @entity: scheduler entity to init
89*4882a593Smuzhiyun * @sched_list: the list of new drm scheds which will replace
90*4882a593Smuzhiyun * existing entity->sched_list
91*4882a593Smuzhiyun * @num_sched_list: number of drm sched in sched_list
92*4882a593Smuzhiyun */
drm_sched_entity_modify_sched(struct drm_sched_entity * entity,struct drm_gpu_scheduler ** sched_list,unsigned int num_sched_list)93*4882a593Smuzhiyun void drm_sched_entity_modify_sched(struct drm_sched_entity *entity,
94*4882a593Smuzhiyun struct drm_gpu_scheduler **sched_list,
95*4882a593Smuzhiyun unsigned int num_sched_list)
96*4882a593Smuzhiyun {
97*4882a593Smuzhiyun WARN_ON(!num_sched_list || !sched_list);
98*4882a593Smuzhiyun
99*4882a593Smuzhiyun entity->sched_list = sched_list;
100*4882a593Smuzhiyun entity->num_sched_list = num_sched_list;
101*4882a593Smuzhiyun }
102*4882a593Smuzhiyun EXPORT_SYMBOL(drm_sched_entity_modify_sched);
103*4882a593Smuzhiyun
104*4882a593Smuzhiyun /**
105*4882a593Smuzhiyun * drm_sched_entity_is_idle - Check if entity is idle
106*4882a593Smuzhiyun *
107*4882a593Smuzhiyun * @entity: scheduler entity
108*4882a593Smuzhiyun *
109*4882a593Smuzhiyun * Returns true if the entity does not have any unscheduled jobs.
110*4882a593Smuzhiyun */
drm_sched_entity_is_idle(struct drm_sched_entity * entity)111*4882a593Smuzhiyun static bool drm_sched_entity_is_idle(struct drm_sched_entity *entity)
112*4882a593Smuzhiyun {
113*4882a593Smuzhiyun rmb(); /* for list_empty to work without lock */
114*4882a593Smuzhiyun
115*4882a593Smuzhiyun if (list_empty(&entity->list) ||
116*4882a593Smuzhiyun spsc_queue_count(&entity->job_queue) == 0 ||
117*4882a593Smuzhiyun entity->stopped)
118*4882a593Smuzhiyun return true;
119*4882a593Smuzhiyun
120*4882a593Smuzhiyun return false;
121*4882a593Smuzhiyun }
122*4882a593Smuzhiyun
123*4882a593Smuzhiyun /**
124*4882a593Smuzhiyun * drm_sched_entity_is_ready - Check if entity is ready
125*4882a593Smuzhiyun *
126*4882a593Smuzhiyun * @entity: scheduler entity
127*4882a593Smuzhiyun *
128*4882a593Smuzhiyun * Return true if entity could provide a job.
129*4882a593Smuzhiyun */
drm_sched_entity_is_ready(struct drm_sched_entity * entity)130*4882a593Smuzhiyun bool drm_sched_entity_is_ready(struct drm_sched_entity *entity)
131*4882a593Smuzhiyun {
132*4882a593Smuzhiyun if (spsc_queue_peek(&entity->job_queue) == NULL)
133*4882a593Smuzhiyun return false;
134*4882a593Smuzhiyun
135*4882a593Smuzhiyun if (READ_ONCE(entity->dependency))
136*4882a593Smuzhiyun return false;
137*4882a593Smuzhiyun
138*4882a593Smuzhiyun return true;
139*4882a593Smuzhiyun }
140*4882a593Smuzhiyun
141*4882a593Smuzhiyun /**
142*4882a593Smuzhiyun * drm_sched_entity_flush - Flush a context entity
143*4882a593Smuzhiyun *
144*4882a593Smuzhiyun * @entity: scheduler entity
145*4882a593Smuzhiyun * @timeout: time to wait in for Q to become empty in jiffies.
146*4882a593Smuzhiyun *
147*4882a593Smuzhiyun * Splitting drm_sched_entity_fini() into two functions, The first one does the
148*4882a593Smuzhiyun * waiting, removes the entity from the runqueue and returns an error when the
149*4882a593Smuzhiyun * process was killed.
150*4882a593Smuzhiyun *
151*4882a593Smuzhiyun * Returns the remaining time in jiffies left from the input timeout
152*4882a593Smuzhiyun */
drm_sched_entity_flush(struct drm_sched_entity * entity,long timeout)153*4882a593Smuzhiyun long drm_sched_entity_flush(struct drm_sched_entity *entity, long timeout)
154*4882a593Smuzhiyun {
155*4882a593Smuzhiyun struct drm_gpu_scheduler *sched;
156*4882a593Smuzhiyun struct task_struct *last_user;
157*4882a593Smuzhiyun long ret = timeout;
158*4882a593Smuzhiyun
159*4882a593Smuzhiyun if (!entity->rq)
160*4882a593Smuzhiyun return 0;
161*4882a593Smuzhiyun
162*4882a593Smuzhiyun sched = entity->rq->sched;
163*4882a593Smuzhiyun /**
164*4882a593Smuzhiyun * The client will not queue more IBs during this fini, consume existing
165*4882a593Smuzhiyun * queued IBs or discard them on SIGKILL
166*4882a593Smuzhiyun */
167*4882a593Smuzhiyun if (current->flags & PF_EXITING) {
168*4882a593Smuzhiyun if (timeout)
169*4882a593Smuzhiyun ret = wait_event_timeout(
170*4882a593Smuzhiyun sched->job_scheduled,
171*4882a593Smuzhiyun drm_sched_entity_is_idle(entity),
172*4882a593Smuzhiyun timeout);
173*4882a593Smuzhiyun } else {
174*4882a593Smuzhiyun wait_event_killable(sched->job_scheduled,
175*4882a593Smuzhiyun drm_sched_entity_is_idle(entity));
176*4882a593Smuzhiyun }
177*4882a593Smuzhiyun
178*4882a593Smuzhiyun /* For killed process disable any more IBs enqueue right now */
179*4882a593Smuzhiyun last_user = cmpxchg(&entity->last_user, current->group_leader, NULL);
180*4882a593Smuzhiyun if ((!last_user || last_user == current->group_leader) &&
181*4882a593Smuzhiyun (current->flags & PF_EXITING) && (current->exit_code == SIGKILL)) {
182*4882a593Smuzhiyun spin_lock(&entity->rq_lock);
183*4882a593Smuzhiyun entity->stopped = true;
184*4882a593Smuzhiyun drm_sched_rq_remove_entity(entity->rq, entity);
185*4882a593Smuzhiyun spin_unlock(&entity->rq_lock);
186*4882a593Smuzhiyun }
187*4882a593Smuzhiyun
188*4882a593Smuzhiyun return ret;
189*4882a593Smuzhiyun }
190*4882a593Smuzhiyun EXPORT_SYMBOL(drm_sched_entity_flush);
191*4882a593Smuzhiyun
192*4882a593Smuzhiyun /**
193*4882a593Smuzhiyun * drm_sched_entity_kill_jobs - helper for drm_sched_entity_kill_jobs
194*4882a593Smuzhiyun *
195*4882a593Smuzhiyun * @f: signaled fence
196*4882a593Smuzhiyun * @cb: our callback structure
197*4882a593Smuzhiyun *
198*4882a593Smuzhiyun * Signal the scheduler finished fence when the entity in question is killed.
199*4882a593Smuzhiyun */
drm_sched_entity_kill_jobs_cb(struct dma_fence * f,struct dma_fence_cb * cb)200*4882a593Smuzhiyun static void drm_sched_entity_kill_jobs_cb(struct dma_fence *f,
201*4882a593Smuzhiyun struct dma_fence_cb *cb)
202*4882a593Smuzhiyun {
203*4882a593Smuzhiyun struct drm_sched_job *job = container_of(cb, struct drm_sched_job,
204*4882a593Smuzhiyun finish_cb);
205*4882a593Smuzhiyun
206*4882a593Smuzhiyun drm_sched_fence_finished(job->s_fence);
207*4882a593Smuzhiyun WARN_ON(job->s_fence->parent);
208*4882a593Smuzhiyun job->sched->ops->free_job(job);
209*4882a593Smuzhiyun }
210*4882a593Smuzhiyun
211*4882a593Smuzhiyun /**
212*4882a593Smuzhiyun * drm_sched_entity_kill_jobs - Make sure all remaining jobs are killed
213*4882a593Smuzhiyun *
214*4882a593Smuzhiyun * @entity: entity which is cleaned up
215*4882a593Smuzhiyun *
216*4882a593Smuzhiyun * Makes sure that all remaining jobs in an entity are killed before it is
217*4882a593Smuzhiyun * destroyed.
218*4882a593Smuzhiyun */
drm_sched_entity_kill_jobs(struct drm_sched_entity * entity)219*4882a593Smuzhiyun static void drm_sched_entity_kill_jobs(struct drm_sched_entity *entity)
220*4882a593Smuzhiyun {
221*4882a593Smuzhiyun struct drm_sched_job *job;
222*4882a593Smuzhiyun struct dma_fence *f;
223*4882a593Smuzhiyun int r;
224*4882a593Smuzhiyun
225*4882a593Smuzhiyun while ((job = to_drm_sched_job(spsc_queue_pop(&entity->job_queue)))) {
226*4882a593Smuzhiyun struct drm_sched_fence *s_fence = job->s_fence;
227*4882a593Smuzhiyun
228*4882a593Smuzhiyun /* Wait for all dependencies to avoid data corruptions */
229*4882a593Smuzhiyun while ((f = job->sched->ops->dependency(job, entity)))
230*4882a593Smuzhiyun dma_fence_wait(f, false);
231*4882a593Smuzhiyun
232*4882a593Smuzhiyun drm_sched_fence_scheduled(s_fence);
233*4882a593Smuzhiyun dma_fence_set_error(&s_fence->finished, -ESRCH);
234*4882a593Smuzhiyun
235*4882a593Smuzhiyun /*
236*4882a593Smuzhiyun * When pipe is hanged by older entity, new entity might
237*4882a593Smuzhiyun * not even have chance to submit it's first job to HW
238*4882a593Smuzhiyun * and so entity->last_scheduled will remain NULL
239*4882a593Smuzhiyun */
240*4882a593Smuzhiyun if (!entity->last_scheduled) {
241*4882a593Smuzhiyun drm_sched_entity_kill_jobs_cb(NULL, &job->finish_cb);
242*4882a593Smuzhiyun continue;
243*4882a593Smuzhiyun }
244*4882a593Smuzhiyun
245*4882a593Smuzhiyun r = dma_fence_add_callback(entity->last_scheduled,
246*4882a593Smuzhiyun &job->finish_cb,
247*4882a593Smuzhiyun drm_sched_entity_kill_jobs_cb);
248*4882a593Smuzhiyun if (r == -ENOENT)
249*4882a593Smuzhiyun drm_sched_entity_kill_jobs_cb(NULL, &job->finish_cb);
250*4882a593Smuzhiyun else if (r)
251*4882a593Smuzhiyun DRM_ERROR("fence add callback failed (%d)\n", r);
252*4882a593Smuzhiyun }
253*4882a593Smuzhiyun }
254*4882a593Smuzhiyun
255*4882a593Smuzhiyun /**
256*4882a593Smuzhiyun * drm_sched_entity_cleanup - Destroy a context entity
257*4882a593Smuzhiyun *
258*4882a593Smuzhiyun * @entity: scheduler entity
259*4882a593Smuzhiyun *
260*4882a593Smuzhiyun * This should be called after @drm_sched_entity_do_release. It goes over the
261*4882a593Smuzhiyun * entity and signals all jobs with an error code if the process was killed.
262*4882a593Smuzhiyun *
263*4882a593Smuzhiyun */
drm_sched_entity_fini(struct drm_sched_entity * entity)264*4882a593Smuzhiyun void drm_sched_entity_fini(struct drm_sched_entity *entity)
265*4882a593Smuzhiyun {
266*4882a593Smuzhiyun struct drm_gpu_scheduler *sched = NULL;
267*4882a593Smuzhiyun
268*4882a593Smuzhiyun if (entity->rq) {
269*4882a593Smuzhiyun sched = entity->rq->sched;
270*4882a593Smuzhiyun drm_sched_rq_remove_entity(entity->rq, entity);
271*4882a593Smuzhiyun }
272*4882a593Smuzhiyun
273*4882a593Smuzhiyun /* Consumption of existing IBs wasn't completed. Forcefully
274*4882a593Smuzhiyun * remove them here.
275*4882a593Smuzhiyun */
276*4882a593Smuzhiyun if (spsc_queue_count(&entity->job_queue)) {
277*4882a593Smuzhiyun if (sched) {
278*4882a593Smuzhiyun /*
279*4882a593Smuzhiyun * Wait for thread to idle to make sure it isn't processing
280*4882a593Smuzhiyun * this entity.
281*4882a593Smuzhiyun */
282*4882a593Smuzhiyun wait_for_completion(&entity->entity_idle);
283*4882a593Smuzhiyun
284*4882a593Smuzhiyun }
285*4882a593Smuzhiyun if (entity->dependency) {
286*4882a593Smuzhiyun dma_fence_remove_callback(entity->dependency,
287*4882a593Smuzhiyun &entity->cb);
288*4882a593Smuzhiyun dma_fence_put(entity->dependency);
289*4882a593Smuzhiyun entity->dependency = NULL;
290*4882a593Smuzhiyun }
291*4882a593Smuzhiyun
292*4882a593Smuzhiyun drm_sched_entity_kill_jobs(entity);
293*4882a593Smuzhiyun }
294*4882a593Smuzhiyun
295*4882a593Smuzhiyun dma_fence_put(entity->last_scheduled);
296*4882a593Smuzhiyun entity->last_scheduled = NULL;
297*4882a593Smuzhiyun }
298*4882a593Smuzhiyun EXPORT_SYMBOL(drm_sched_entity_fini);
299*4882a593Smuzhiyun
300*4882a593Smuzhiyun /**
301*4882a593Smuzhiyun * drm_sched_entity_fini - Destroy a context entity
302*4882a593Smuzhiyun *
303*4882a593Smuzhiyun * @entity: scheduler entity
304*4882a593Smuzhiyun *
305*4882a593Smuzhiyun * Calls drm_sched_entity_do_release() and drm_sched_entity_cleanup()
306*4882a593Smuzhiyun */
drm_sched_entity_destroy(struct drm_sched_entity * entity)307*4882a593Smuzhiyun void drm_sched_entity_destroy(struct drm_sched_entity *entity)
308*4882a593Smuzhiyun {
309*4882a593Smuzhiyun drm_sched_entity_flush(entity, MAX_WAIT_SCHED_ENTITY_Q_EMPTY);
310*4882a593Smuzhiyun drm_sched_entity_fini(entity);
311*4882a593Smuzhiyun }
312*4882a593Smuzhiyun EXPORT_SYMBOL(drm_sched_entity_destroy);
313*4882a593Smuzhiyun
314*4882a593Smuzhiyun /**
315*4882a593Smuzhiyun * drm_sched_entity_clear_dep - callback to clear the entities dependency
316*4882a593Smuzhiyun */
drm_sched_entity_clear_dep(struct dma_fence * f,struct dma_fence_cb * cb)317*4882a593Smuzhiyun static void drm_sched_entity_clear_dep(struct dma_fence *f,
318*4882a593Smuzhiyun struct dma_fence_cb *cb)
319*4882a593Smuzhiyun {
320*4882a593Smuzhiyun struct drm_sched_entity *entity =
321*4882a593Smuzhiyun container_of(cb, struct drm_sched_entity, cb);
322*4882a593Smuzhiyun
323*4882a593Smuzhiyun entity->dependency = NULL;
324*4882a593Smuzhiyun dma_fence_put(f);
325*4882a593Smuzhiyun }
326*4882a593Smuzhiyun
327*4882a593Smuzhiyun /**
328*4882a593Smuzhiyun * drm_sched_entity_clear_dep - callback to clear the entities dependency and
329*4882a593Smuzhiyun * wake up scheduler
330*4882a593Smuzhiyun */
drm_sched_entity_wakeup(struct dma_fence * f,struct dma_fence_cb * cb)331*4882a593Smuzhiyun static void drm_sched_entity_wakeup(struct dma_fence *f,
332*4882a593Smuzhiyun struct dma_fence_cb *cb)
333*4882a593Smuzhiyun {
334*4882a593Smuzhiyun struct drm_sched_entity *entity =
335*4882a593Smuzhiyun container_of(cb, struct drm_sched_entity, cb);
336*4882a593Smuzhiyun
337*4882a593Smuzhiyun drm_sched_entity_clear_dep(f, cb);
338*4882a593Smuzhiyun drm_sched_wakeup(entity->rq->sched);
339*4882a593Smuzhiyun }
340*4882a593Smuzhiyun
341*4882a593Smuzhiyun /**
342*4882a593Smuzhiyun * drm_sched_entity_set_priority - Sets priority of the entity
343*4882a593Smuzhiyun *
344*4882a593Smuzhiyun * @entity: scheduler entity
345*4882a593Smuzhiyun * @priority: scheduler priority
346*4882a593Smuzhiyun *
347*4882a593Smuzhiyun * Update the priority of runqueus used for the entity.
348*4882a593Smuzhiyun */
drm_sched_entity_set_priority(struct drm_sched_entity * entity,enum drm_sched_priority priority)349*4882a593Smuzhiyun void drm_sched_entity_set_priority(struct drm_sched_entity *entity,
350*4882a593Smuzhiyun enum drm_sched_priority priority)
351*4882a593Smuzhiyun {
352*4882a593Smuzhiyun spin_lock(&entity->rq_lock);
353*4882a593Smuzhiyun entity->priority = priority;
354*4882a593Smuzhiyun spin_unlock(&entity->rq_lock);
355*4882a593Smuzhiyun }
356*4882a593Smuzhiyun EXPORT_SYMBOL(drm_sched_entity_set_priority);
357*4882a593Smuzhiyun
358*4882a593Smuzhiyun /**
359*4882a593Smuzhiyun * drm_sched_entity_add_dependency_cb - add callback for the entities dependency
360*4882a593Smuzhiyun *
361*4882a593Smuzhiyun * @entity: entity with dependency
362*4882a593Smuzhiyun *
363*4882a593Smuzhiyun * Add a callback to the current dependency of the entity to wake up the
364*4882a593Smuzhiyun * scheduler when the entity becomes available.
365*4882a593Smuzhiyun */
drm_sched_entity_add_dependency_cb(struct drm_sched_entity * entity)366*4882a593Smuzhiyun static bool drm_sched_entity_add_dependency_cb(struct drm_sched_entity *entity)
367*4882a593Smuzhiyun {
368*4882a593Smuzhiyun struct drm_gpu_scheduler *sched = entity->rq->sched;
369*4882a593Smuzhiyun struct dma_fence *fence = entity->dependency;
370*4882a593Smuzhiyun struct drm_sched_fence *s_fence;
371*4882a593Smuzhiyun
372*4882a593Smuzhiyun if (fence->context == entity->fence_context ||
373*4882a593Smuzhiyun fence->context == entity->fence_context + 1) {
374*4882a593Smuzhiyun /*
375*4882a593Smuzhiyun * Fence is a scheduled/finished fence from a job
376*4882a593Smuzhiyun * which belongs to the same entity, we can ignore
377*4882a593Smuzhiyun * fences from ourself
378*4882a593Smuzhiyun */
379*4882a593Smuzhiyun dma_fence_put(entity->dependency);
380*4882a593Smuzhiyun return false;
381*4882a593Smuzhiyun }
382*4882a593Smuzhiyun
383*4882a593Smuzhiyun s_fence = to_drm_sched_fence(fence);
384*4882a593Smuzhiyun if (s_fence && s_fence->sched == sched) {
385*4882a593Smuzhiyun
386*4882a593Smuzhiyun /*
387*4882a593Smuzhiyun * Fence is from the same scheduler, only need to wait for
388*4882a593Smuzhiyun * it to be scheduled
389*4882a593Smuzhiyun */
390*4882a593Smuzhiyun fence = dma_fence_get(&s_fence->scheduled);
391*4882a593Smuzhiyun dma_fence_put(entity->dependency);
392*4882a593Smuzhiyun entity->dependency = fence;
393*4882a593Smuzhiyun if (!dma_fence_add_callback(fence, &entity->cb,
394*4882a593Smuzhiyun drm_sched_entity_clear_dep))
395*4882a593Smuzhiyun return true;
396*4882a593Smuzhiyun
397*4882a593Smuzhiyun /* Ignore it when it is already scheduled */
398*4882a593Smuzhiyun dma_fence_put(fence);
399*4882a593Smuzhiyun return false;
400*4882a593Smuzhiyun }
401*4882a593Smuzhiyun
402*4882a593Smuzhiyun if (!dma_fence_add_callback(entity->dependency, &entity->cb,
403*4882a593Smuzhiyun drm_sched_entity_wakeup))
404*4882a593Smuzhiyun return true;
405*4882a593Smuzhiyun
406*4882a593Smuzhiyun dma_fence_put(entity->dependency);
407*4882a593Smuzhiyun return false;
408*4882a593Smuzhiyun }
409*4882a593Smuzhiyun
410*4882a593Smuzhiyun /**
411*4882a593Smuzhiyun * drm_sched_entity_pop_job - get a ready to be scheduled job from the entity
412*4882a593Smuzhiyun *
413*4882a593Smuzhiyun * @entity: entity to get the job from
414*4882a593Smuzhiyun *
415*4882a593Smuzhiyun * Process all dependencies and try to get one job from the entities queue.
416*4882a593Smuzhiyun */
drm_sched_entity_pop_job(struct drm_sched_entity * entity)417*4882a593Smuzhiyun struct drm_sched_job *drm_sched_entity_pop_job(struct drm_sched_entity *entity)
418*4882a593Smuzhiyun {
419*4882a593Smuzhiyun struct drm_gpu_scheduler *sched = entity->rq->sched;
420*4882a593Smuzhiyun struct drm_sched_job *sched_job;
421*4882a593Smuzhiyun
422*4882a593Smuzhiyun sched_job = to_drm_sched_job(spsc_queue_peek(&entity->job_queue));
423*4882a593Smuzhiyun if (!sched_job)
424*4882a593Smuzhiyun return NULL;
425*4882a593Smuzhiyun
426*4882a593Smuzhiyun while ((entity->dependency =
427*4882a593Smuzhiyun sched->ops->dependency(sched_job, entity))) {
428*4882a593Smuzhiyun trace_drm_sched_job_wait_dep(sched_job, entity->dependency);
429*4882a593Smuzhiyun
430*4882a593Smuzhiyun if (drm_sched_entity_add_dependency_cb(entity))
431*4882a593Smuzhiyun return NULL;
432*4882a593Smuzhiyun }
433*4882a593Smuzhiyun
434*4882a593Smuzhiyun /* skip jobs from entity that marked guilty */
435*4882a593Smuzhiyun if (entity->guilty && atomic_read(entity->guilty))
436*4882a593Smuzhiyun dma_fence_set_error(&sched_job->s_fence->finished, -ECANCELED);
437*4882a593Smuzhiyun
438*4882a593Smuzhiyun dma_fence_put(entity->last_scheduled);
439*4882a593Smuzhiyun entity->last_scheduled = dma_fence_get(&sched_job->s_fence->finished);
440*4882a593Smuzhiyun
441*4882a593Smuzhiyun spsc_queue_pop(&entity->job_queue);
442*4882a593Smuzhiyun return sched_job;
443*4882a593Smuzhiyun }
444*4882a593Smuzhiyun
445*4882a593Smuzhiyun /**
446*4882a593Smuzhiyun * drm_sched_entity_select_rq - select a new rq for the entity
447*4882a593Smuzhiyun *
448*4882a593Smuzhiyun * @entity: scheduler entity
449*4882a593Smuzhiyun *
450*4882a593Smuzhiyun * Check all prerequisites and select a new rq for the entity for load
451*4882a593Smuzhiyun * balancing.
452*4882a593Smuzhiyun */
drm_sched_entity_select_rq(struct drm_sched_entity * entity)453*4882a593Smuzhiyun void drm_sched_entity_select_rq(struct drm_sched_entity *entity)
454*4882a593Smuzhiyun {
455*4882a593Smuzhiyun struct dma_fence *fence;
456*4882a593Smuzhiyun struct drm_gpu_scheduler *sched;
457*4882a593Smuzhiyun struct drm_sched_rq *rq;
458*4882a593Smuzhiyun
459*4882a593Smuzhiyun if (spsc_queue_count(&entity->job_queue) || entity->num_sched_list <= 1)
460*4882a593Smuzhiyun return;
461*4882a593Smuzhiyun
462*4882a593Smuzhiyun fence = READ_ONCE(entity->last_scheduled);
463*4882a593Smuzhiyun if (fence && !dma_fence_is_signaled(fence))
464*4882a593Smuzhiyun return;
465*4882a593Smuzhiyun
466*4882a593Smuzhiyun spin_lock(&entity->rq_lock);
467*4882a593Smuzhiyun sched = drm_sched_pick_best(entity->sched_list, entity->num_sched_list);
468*4882a593Smuzhiyun rq = sched ? &sched->sched_rq[entity->priority] : NULL;
469*4882a593Smuzhiyun if (rq != entity->rq) {
470*4882a593Smuzhiyun drm_sched_rq_remove_entity(entity->rq, entity);
471*4882a593Smuzhiyun entity->rq = rq;
472*4882a593Smuzhiyun }
473*4882a593Smuzhiyun
474*4882a593Smuzhiyun spin_unlock(&entity->rq_lock);
475*4882a593Smuzhiyun }
476*4882a593Smuzhiyun
477*4882a593Smuzhiyun /**
478*4882a593Smuzhiyun * drm_sched_entity_push_job - Submit a job to the entity's job queue
479*4882a593Smuzhiyun *
480*4882a593Smuzhiyun * @sched_job: job to submit
481*4882a593Smuzhiyun * @entity: scheduler entity
482*4882a593Smuzhiyun *
483*4882a593Smuzhiyun * Note: To guarantee that the order of insertion to queue matches
484*4882a593Smuzhiyun * the job's fence sequence number this function should be
485*4882a593Smuzhiyun * called with drm_sched_job_init under common lock.
486*4882a593Smuzhiyun *
487*4882a593Smuzhiyun * Returns 0 for success, negative error code otherwise.
488*4882a593Smuzhiyun */
drm_sched_entity_push_job(struct drm_sched_job * sched_job,struct drm_sched_entity * entity)489*4882a593Smuzhiyun void drm_sched_entity_push_job(struct drm_sched_job *sched_job,
490*4882a593Smuzhiyun struct drm_sched_entity *entity)
491*4882a593Smuzhiyun {
492*4882a593Smuzhiyun bool first;
493*4882a593Smuzhiyun
494*4882a593Smuzhiyun trace_drm_sched_job(sched_job, entity);
495*4882a593Smuzhiyun atomic_inc(&entity->rq->sched->score);
496*4882a593Smuzhiyun WRITE_ONCE(entity->last_user, current->group_leader);
497*4882a593Smuzhiyun first = spsc_queue_push(&entity->job_queue, &sched_job->queue_node);
498*4882a593Smuzhiyun
499*4882a593Smuzhiyun /* first job wakes up scheduler */
500*4882a593Smuzhiyun if (first) {
501*4882a593Smuzhiyun /* Add the entity to the run queue */
502*4882a593Smuzhiyun spin_lock(&entity->rq_lock);
503*4882a593Smuzhiyun if (entity->stopped) {
504*4882a593Smuzhiyun spin_unlock(&entity->rq_lock);
505*4882a593Smuzhiyun
506*4882a593Smuzhiyun DRM_ERROR("Trying to push to a killed entity\n");
507*4882a593Smuzhiyun return;
508*4882a593Smuzhiyun }
509*4882a593Smuzhiyun drm_sched_rq_add_entity(entity->rq, entity);
510*4882a593Smuzhiyun spin_unlock(&entity->rq_lock);
511*4882a593Smuzhiyun drm_sched_wakeup(entity->rq->sched);
512*4882a593Smuzhiyun }
513*4882a593Smuzhiyun }
514*4882a593Smuzhiyun EXPORT_SYMBOL(drm_sched_entity_push_job);
515