Lines Matching full:scheduler
151 void rga_job_scheduler_dump_info(struct rga_scheduler_t *scheduler) in rga_job_scheduler_dump_info() argument
155 lockdep_assert_held(&scheduler->irq_lock); in rga_job_scheduler_dump_info()
159 dev_driver_string(scheduler->dev), in rga_job_scheduler_dump_info()
160 scheduler->core, scheduler->job_count, scheduler->status); in rga_job_scheduler_dump_info()
162 if (scheduler->running_job) in rga_job_scheduler_dump_info()
163 rga_job_dump_info(scheduler->running_job); in rga_job_scheduler_dump_info()
165 list_for_each_entry(job_pos, &scheduler->todo_list, head) { in rga_job_scheduler_dump_info()
172 static int rga_job_run(struct rga_job *job, struct rga_scheduler_t *scheduler) in rga_job_run() argument
177 ret = rga_power_enable(scheduler); in rga_job_run()
183 ret = scheduler->ops->set_reg(job, scheduler); in rga_job_run()
186 rga_power_disable(scheduler); in rga_job_run()
199 void rga_job_next(struct rga_scheduler_t *scheduler) in rga_job_next() argument
206 spin_lock_irqsave(&scheduler->irq_lock, flags); in rga_job_next()
208 if (scheduler->running_job || in rga_job_next()
209 list_empty(&scheduler->todo_list)) { in rga_job_next()
210 spin_unlock_irqrestore(&scheduler->irq_lock, flags); in rga_job_next()
214 job = list_first_entry(&scheduler->todo_list, struct rga_job, head); in rga_job_next()
218 scheduler->job_count--; in rga_job_next()
220 scheduler->running_job = job; in rga_job_next()
224 spin_unlock_irqrestore(&scheduler->irq_lock, flags); in rga_job_next()
226 ret = rga_job_run(job, scheduler); in rga_job_next()
231 spin_lock_irqsave(&scheduler->irq_lock, flags); in rga_job_next()
233 scheduler->running_job = NULL; in rga_job_next()
236 spin_unlock_irqrestore(&scheduler->irq_lock, flags); in rga_job_next()
239 rga_request_release_signal(scheduler, job); in rga_job_next()
247 struct rga_job *rga_job_done(struct rga_scheduler_t *scheduler) in rga_job_done() argument
253 spin_lock_irqsave(&scheduler->irq_lock, flags); in rga_job_done()
255 job = scheduler->running_job; in rga_job_done()
257 pr_err("core[0x%x] running job has been cleanup.\n", scheduler->core); in rga_job_done()
259 spin_unlock_irqrestore(&scheduler->irq_lock, flags); in rga_job_done()
262 scheduler->running_job = NULL; in rga_job_done()
264 scheduler->timer.busy_time += ktime_us_delta(now, job->hw_recoder_time); in rga_job_done()
267 spin_unlock_irqrestore(&scheduler->irq_lock, flags); in rga_job_done()
269 if (scheduler->ops->read_back_reg) in rga_job_done()
270 scheduler->ops->read_back_reg(job, scheduler); in rga_job_done()
286 static void rga_job_scheduler_timeout_clean(struct rga_scheduler_t *scheduler) in rga_job_scheduler_timeout_clean() argument
291 spin_lock_irqsave(&scheduler->irq_lock, flags); in rga_job_scheduler_timeout_clean()
293 if (scheduler->running_job == NULL || scheduler->running_job->hw_running_time == 0) { in rga_job_scheduler_timeout_clean()
294 spin_unlock_irqrestore(&scheduler->irq_lock, flags); in rga_job_scheduler_timeout_clean()
298 job = scheduler->running_job; in rga_job_scheduler_timeout_clean()
300 scheduler->running_job = NULL; in rga_job_scheduler_timeout_clean()
301 scheduler->status = RGA_SCHEDULER_ABORT; in rga_job_scheduler_timeout_clean()
302 scheduler->ops->soft_reset(scheduler); in rga_job_scheduler_timeout_clean()
304 spin_unlock_irqrestore(&scheduler->irq_lock, flags); in rga_job_scheduler_timeout_clean()
309 rga_request_release_signal(scheduler, job); in rga_job_scheduler_timeout_clean()
311 rga_power_disable(scheduler); in rga_job_scheduler_timeout_clean()
313 spin_unlock_irqrestore(&scheduler->irq_lock, flags); in rga_job_scheduler_timeout_clean()
322 struct rga_scheduler_t *scheduler = job->scheduler; in rga_job_insert_todo_list() local
324 spin_lock_irqsave(&scheduler->irq_lock, flags); in rga_job_insert_todo_list()
327 if (list_empty(&scheduler->todo_list) in rga_job_insert_todo_list()
329 list_add_tail(&job->head, &scheduler->todo_list); in rga_job_insert_todo_list()
331 list_for_each_entry(job_pos, &scheduler->todo_list, head) { in rga_job_insert_todo_list()
347 list_add_tail(&job->head, &scheduler->todo_list); in rga_job_insert_todo_list()
350 scheduler->job_count++; in rga_job_insert_todo_list()
353 spin_unlock_irqrestore(&scheduler->irq_lock, flags); in rga_job_insert_todo_list()
359 struct rga_scheduler_t *scheduler = NULL; in rga_job_schedule() local
362 scheduler = rga_drvdata->scheduler[i]; in rga_job_schedule()
363 rga_job_scheduler_timeout_clean(scheduler); in rga_job_schedule()
374 job->core = rga_drvdata->scheduler[0]->core; in rga_job_schedule()
375 job->scheduler = rga_drvdata->scheduler[0]; in rga_job_schedule()
378 scheduler = job->scheduler; in rga_job_schedule()
379 if (scheduler == NULL) { in rga_job_schedule()
380 pr_err("failed to get scheduler, %s(%d)\n", __func__, __LINE__); in rga_job_schedule()
385 return scheduler; in rga_job_schedule()
392 struct rga_scheduler_t *scheduler = NULL; in rga_job_commit() local
405 scheduler = rga_job_schedule(job); in rga_job_commit()
406 if (scheduler == NULL) { in rga_job_commit()
407 pr_err("failed to get scheduler, %s(%d)\n", __func__, __LINE__); in rga_job_commit()
412 if (rga_power_enable(scheduler) < 0) { in rga_job_commit()
425 ret = scheduler->ops->init_reg(job); in rga_job_commit()
434 rga_job_next(scheduler); in rga_job_commit()
436 rga_power_disable(scheduler); in rga_job_commit()
444 rga_power_disable(scheduler); in rga_job_commit()
448 rga_request_release_signal(scheduler, job); in rga_job_commit()
621 struct rga_scheduler_t *scheduler = NULL; in rga_request_scheduler_job_abort() local
626 scheduler = rga_drvdata->scheduler[i]; in rga_request_scheduler_job_abort()
627 spin_lock_irqsave(&scheduler->irq_lock, flags); in rga_request_scheduler_job_abort()
629 list_for_each_entry_safe(job, job_q, &scheduler->todo_list, head) { in rga_request_scheduler_job_abort()
632 scheduler->job_count--; in rga_request_scheduler_job_abort()
639 if (scheduler->running_job) { in rga_request_scheduler_job_abort()
640 if (request->id == scheduler->running_job->request_id) { in rga_request_scheduler_job_abort()
641 job = scheduler->running_job; in rga_request_scheduler_job_abort()
642 scheduler_status = scheduler->status; in rga_request_scheduler_job_abort()
643 scheduler->running_job = NULL; in rga_request_scheduler_job_abort()
644 scheduler->status = RGA_SCHEDULER_ABORT; in rga_request_scheduler_job_abort()
648 scheduler->timer.busy_time += in rga_request_scheduler_job_abort()
650 scheduler->ops->soft_reset(scheduler); in rga_request_scheduler_job_abort()
654 scheduler->core, request->id); in rga_request_scheduler_job_abort()
659 spin_unlock_irqrestore(&scheduler->irq_lock, flags); in rga_request_scheduler_job_abort()
662 rga_power_disable(scheduler); in rga_request_scheduler_job_abort()
743 struct rga_scheduler_t *scheduler = NULL; in rga_request_timeout_query_state() local
747 scheduler = rga_drvdata->scheduler[i]; in rga_request_timeout_query_state()
749 spin_lock_irqsave(&scheduler->irq_lock, flags); in rga_request_timeout_query_state()
751 if (scheduler->running_job) { in rga_request_timeout_query_state()
752 job = scheduler->running_job; in rga_request_timeout_query_state()
756 spin_unlock_irqrestore(&scheduler->irq_lock, flags); in rga_request_timeout_query_state()
760 spin_unlock_irqrestore(&scheduler->irq_lock, flags); in rga_request_timeout_query_state()
766 spin_unlock_irqrestore(&scheduler->irq_lock, flags); in rga_request_timeout_query_state()
773 spin_unlock_irqrestore(&scheduler->irq_lock, flags); in rga_request_timeout_query_state()
849 int rga_request_release_signal(struct rga_scheduler_t *scheduler, struct rga_job *job) in rga_request_release_signal() argument