Lines Matching refs:sched
72 static void drm_sched_rq_init(struct drm_gpu_scheduler *sched, in drm_sched_rq_init() argument
78 rq->sched = sched; in drm_sched_rq_init()
95 atomic_inc(&rq->sched->score); in drm_sched_rq_add_entity()
114 atomic_dec(&rq->sched->score); in drm_sched_rq_remove_entity()
176 struct drm_gpu_scheduler *sched = entity->rq->sched; in drm_sched_dependency_optimized() local
184 if (s_fence && s_fence->sched == sched) in drm_sched_dependency_optimized()
198 static void drm_sched_start_timeout(struct drm_gpu_scheduler *sched) in drm_sched_start_timeout() argument
200 if (sched->timeout != MAX_SCHEDULE_TIMEOUT && in drm_sched_start_timeout()
201 !list_empty(&sched->ring_mirror_list)) in drm_sched_start_timeout()
202 schedule_delayed_work(&sched->work_tdr, sched->timeout); in drm_sched_start_timeout()
212 void drm_sched_fault(struct drm_gpu_scheduler *sched) in drm_sched_fault() argument
214 mod_delayed_work(system_wq, &sched->work_tdr, 0); in drm_sched_fault()
230 unsigned long drm_sched_suspend_timeout(struct drm_gpu_scheduler *sched) in drm_sched_suspend_timeout() argument
234 sched_timeout = sched->work_tdr.timer.expires; in drm_sched_suspend_timeout()
240 if (mod_delayed_work(system_wq, &sched->work_tdr, MAX_SCHEDULE_TIMEOUT) in drm_sched_suspend_timeout()
244 return sched->timeout; in drm_sched_suspend_timeout()
256 void drm_sched_resume_timeout(struct drm_gpu_scheduler *sched, in drm_sched_resume_timeout() argument
259 spin_lock(&sched->job_list_lock); in drm_sched_resume_timeout()
261 if (list_empty(&sched->ring_mirror_list)) in drm_sched_resume_timeout()
262 cancel_delayed_work(&sched->work_tdr); in drm_sched_resume_timeout()
264 mod_delayed_work(system_wq, &sched->work_tdr, remaining); in drm_sched_resume_timeout()
266 spin_unlock(&sched->job_list_lock); in drm_sched_resume_timeout()
272 struct drm_gpu_scheduler *sched = s_job->sched; in drm_sched_job_begin() local
274 spin_lock(&sched->job_list_lock); in drm_sched_job_begin()
275 list_add_tail(&s_job->node, &sched->ring_mirror_list); in drm_sched_job_begin()
276 drm_sched_start_timeout(sched); in drm_sched_job_begin()
277 spin_unlock(&sched->job_list_lock); in drm_sched_job_begin()
282 struct drm_gpu_scheduler *sched; in drm_sched_job_timedout() local
285 sched = container_of(work, struct drm_gpu_scheduler, work_tdr.work); in drm_sched_job_timedout()
288 spin_lock(&sched->job_list_lock); in drm_sched_job_timedout()
289 job = list_first_entry_or_null(&sched->ring_mirror_list, in drm_sched_job_timedout()
299 spin_unlock(&sched->job_list_lock); in drm_sched_job_timedout()
301 job->sched->ops->timedout_job(job); in drm_sched_job_timedout()
307 if (sched->free_guilty) { in drm_sched_job_timedout()
308 job->sched->ops->free_job(job); in drm_sched_job_timedout()
309 sched->free_guilty = false; in drm_sched_job_timedout()
312 spin_unlock(&sched->job_list_lock); in drm_sched_job_timedout()
315 spin_lock(&sched->job_list_lock); in drm_sched_job_timedout()
316 drm_sched_start_timeout(sched); in drm_sched_job_timedout()
317 spin_unlock(&sched->job_list_lock); in drm_sched_job_timedout()
334 struct drm_gpu_scheduler *sched = bad->sched; in drm_sched_increase_karma() local
344 struct drm_sched_rq *rq = &sched->sched_rq[i]; in drm_sched_increase_karma()
351 bad->sched->hang_limit) in drm_sched_increase_karma()
377 void drm_sched_stop(struct drm_gpu_scheduler *sched, struct drm_sched_job *bad) in drm_sched_stop() argument
381 kthread_park(sched->thread); in drm_sched_stop()
390 if (bad && bad->sched == sched) in drm_sched_stop()
395 list_add(&bad->node, &sched->ring_mirror_list); in drm_sched_stop()
403 list_for_each_entry_safe_reverse(s_job, tmp, &sched->ring_mirror_list, node) { in drm_sched_stop()
407 atomic_dec(&sched->hw_rq_count); in drm_sched_stop()
413 spin_lock(&sched->job_list_lock); in drm_sched_stop()
415 spin_unlock(&sched->job_list_lock); in drm_sched_stop()
431 sched->ops->free_job(s_job); in drm_sched_stop()
433 sched->free_guilty = true; in drm_sched_stop()
443 cancel_delayed_work(&sched->work_tdr); in drm_sched_stop()
455 void drm_sched_start(struct drm_gpu_scheduler *sched, bool full_recovery) in drm_sched_start() argument
465 list_for_each_entry_safe(s_job, tmp, &sched->ring_mirror_list, node) { in drm_sched_start()
468 atomic_inc(&sched->hw_rq_count); in drm_sched_start()
486 spin_lock(&sched->job_list_lock); in drm_sched_start()
487 drm_sched_start_timeout(sched); in drm_sched_start()
488 spin_unlock(&sched->job_list_lock); in drm_sched_start()
491 kthread_unpark(sched->thread); in drm_sched_start()
501 void drm_sched_resubmit_jobs(struct drm_gpu_scheduler *sched) in drm_sched_resubmit_jobs() argument
508 list_for_each_entry_safe(s_job, tmp, &sched->ring_mirror_list, node) { in drm_sched_resubmit_jobs()
511 if (!found_guilty && atomic_read(&s_job->karma) > sched->hang_limit) { in drm_sched_resubmit_jobs()
520 fence = sched->ops->run_job(s_job); in drm_sched_resubmit_jobs()
552 struct drm_gpu_scheduler *sched; in drm_sched_job_init() local
558 sched = entity->rq->sched; in drm_sched_job_init()
560 job->sched = sched; in drm_sched_job_init()
562 job->s_priority = entity->rq - sched->sched_rq; in drm_sched_job_init()
566 job->id = atomic64_inc_return(&sched->job_id_count); in drm_sched_job_init()
593 static bool drm_sched_ready(struct drm_gpu_scheduler *sched) in drm_sched_ready() argument
595 return atomic_read(&sched->hw_rq_count) < in drm_sched_ready()
596 sched->hw_submission_limit; in drm_sched_ready()
605 void drm_sched_wakeup(struct drm_gpu_scheduler *sched) in drm_sched_wakeup() argument
607 if (drm_sched_ready(sched)) in drm_sched_wakeup()
608 wake_up_interruptible(&sched->wake_up_worker); in drm_sched_wakeup()
619 drm_sched_select_entity(struct drm_gpu_scheduler *sched) in drm_sched_select_entity() argument
624 if (!drm_sched_ready(sched)) in drm_sched_select_entity()
629 entity = drm_sched_rq_select_entity(&sched->sched_rq[i]); in drm_sched_select_entity()
649 struct drm_gpu_scheduler *sched = s_fence->sched; in drm_sched_process_job() local
651 atomic_dec(&sched->hw_rq_count); in drm_sched_process_job()
652 atomic_dec(&sched->score); in drm_sched_process_job()
659 wake_up_interruptible(&sched->wake_up_worker); in drm_sched_process_job()
671 drm_sched_get_cleanup_job(struct drm_gpu_scheduler *sched) in drm_sched_get_cleanup_job() argument
679 if ((sched->timeout != MAX_SCHEDULE_TIMEOUT && in drm_sched_get_cleanup_job()
680 !cancel_delayed_work(&sched->work_tdr)) || in drm_sched_get_cleanup_job()
684 spin_lock(&sched->job_list_lock); in drm_sched_get_cleanup_job()
686 job = list_first_entry_or_null(&sched->ring_mirror_list, in drm_sched_get_cleanup_job()
695 drm_sched_start_timeout(sched); in drm_sched_get_cleanup_job()
698 spin_unlock(&sched->job_list_lock); in drm_sched_get_cleanup_job()
715 struct drm_gpu_scheduler *sched, *picked_sched = NULL; in drm_sched_pick_best() local
720 sched = sched_list[i]; in drm_sched_pick_best()
722 if (!sched->ready) { in drm_sched_pick_best()
724 sched->name); in drm_sched_pick_best()
728 num_score = atomic_read(&sched->score); in drm_sched_pick_best()
731 picked_sched = sched; in drm_sched_pick_best()
746 static bool drm_sched_blocked(struct drm_gpu_scheduler *sched) in drm_sched_blocked() argument
765 struct drm_gpu_scheduler *sched = (struct drm_gpu_scheduler *)param; in drm_sched_main() local
777 wait_event_interruptible(sched->wake_up_worker, in drm_sched_main()
778 (cleanup_job = drm_sched_get_cleanup_job(sched)) || in drm_sched_main()
779 (!drm_sched_blocked(sched) && in drm_sched_main()
780 (entity = drm_sched_select_entity(sched))) || in drm_sched_main()
784 sched->ops->free_job(cleanup_job); in drm_sched_main()
786 drm_sched_start_timeout(sched); in drm_sched_main()
801 atomic_inc(&sched->hw_rq_count); in drm_sched_main()
805 fence = sched->ops->run_job(sched_job); in drm_sched_main()
825 wake_up(&sched->job_scheduled); in drm_sched_main()
842 int drm_sched_init(struct drm_gpu_scheduler *sched, in drm_sched_init() argument
850 sched->ops = ops; in drm_sched_init()
851 sched->hw_submission_limit = hw_submission; in drm_sched_init()
852 sched->name = name; in drm_sched_init()
853 sched->timeout = timeout; in drm_sched_init()
854 sched->hang_limit = hang_limit; in drm_sched_init()
856 drm_sched_rq_init(sched, &sched->sched_rq[i]); in drm_sched_init()
858 init_waitqueue_head(&sched->wake_up_worker); in drm_sched_init()
859 init_waitqueue_head(&sched->job_scheduled); in drm_sched_init()
860 INIT_LIST_HEAD(&sched->ring_mirror_list); in drm_sched_init()
861 spin_lock_init(&sched->job_list_lock); in drm_sched_init()
862 atomic_set(&sched->hw_rq_count, 0); in drm_sched_init()
863 INIT_DELAYED_WORK(&sched->work_tdr, drm_sched_job_timedout); in drm_sched_init()
864 atomic_set(&sched->score, 0); in drm_sched_init()
865 atomic64_set(&sched->job_id_count, 0); in drm_sched_init()
868 sched->thread = kthread_run(drm_sched_main, sched, sched->name); in drm_sched_init()
869 if (IS_ERR(sched->thread)) { in drm_sched_init()
870 ret = PTR_ERR(sched->thread); in drm_sched_init()
871 sched->thread = NULL; in drm_sched_init()
876 sched->ready = true; in drm_sched_init()
888 void drm_sched_fini(struct drm_gpu_scheduler *sched) in drm_sched_fini() argument
893 if (sched->thread) in drm_sched_fini()
894 kthread_stop(sched->thread); in drm_sched_fini()
897 struct drm_sched_rq *rq = &sched->sched_rq[i]; in drm_sched_fini()
915 wake_up_all(&sched->job_scheduled); in drm_sched_fini()
918 cancel_delayed_work_sync(&sched->work_tdr); in drm_sched_fini()
920 sched->ready = false; in drm_sched_fini()