Lines Matching full:job

18 	struct rve_job *job;  in rve_scheduler_get_pending_job_list()  local
22 job = list_first_entry_or_null(&scheduler->todo_list, in rve_scheduler_get_pending_job_list()
27 return job; in rve_scheduler_get_pending_job_list()
34 struct rve_job *job; in rve_scheduler_get_running_job() local
38 job = scheduler->running_job; in rve_scheduler_get_running_job()
42 return job; in rve_scheduler_get_running_job()
45 static void rve_scheduler_set_pid_info(struct rve_job *job, ktime_t now) in rve_scheduler_set_pid_info() argument
52 scheduler = rve_job_get_scheduler(job); in rve_scheduler_set_pid_info()
56 scheduler->session.pid_info[i].pid = job->pid; in rve_scheduler_set_pid_info()
58 if (scheduler->session.pid_info[i].pid == job->pid) { in rve_scheduler_set_pid_info()
61 (job->hw_running_time - now); in rve_scheduler_set_pid_info()
77 scheduler->session.pid_info[pid_mark].pid = job->pid; in rve_scheduler_set_pid_info()
79 ktime_us_delta(now, job->hw_running_time); in rve_scheduler_set_pid_info()
83 struct rve_scheduler_t *rve_job_get_scheduler(struct rve_job *job) in rve_job_get_scheduler() argument
85 return job->scheduler; in rve_job_get_scheduler()
88 struct rve_internal_ctx_t *rve_job_get_internal_ctx(struct rve_job *job) in rve_job_get_internal_ctx() argument
90 return job->ctx; in rve_job_get_internal_ctx()
93 static void rve_job_free(struct rve_job *job) in rve_job_free() argument
96 if (job->out_fence) in rve_job_free()
97 dma_fence_put(job->out_fence); in rve_job_free()
100 free_page((unsigned long)job); in rve_job_free()
103 static int rve_job_cleanup(struct rve_job *job) in rve_job_cleanup() argument
108 pr_info("(pid:%d) job clean use time = %lld\n", job->pid, in rve_job_cleanup()
109 ktime_us_delta(now, job->timestamp)); in rve_job_cleanup()
111 rve_job_free(job); in rve_job_cleanup()
147 struct rve_job *job = NULL; in rve_job_alloc() local
149 job = (struct rve_job *)get_zeroed_page(GFP_KERNEL | GFP_DMA32); in rve_job_alloc()
150 if (!job) in rve_job_alloc()
154 spin_lock_init(&job->fence_lock); in rve_job_alloc()
156 INIT_LIST_HEAD(&job->head); in rve_job_alloc()
158 job->timestamp = ktime_get(); in rve_job_alloc()
159 job->pid = current->pid; in rve_job_alloc()
160 job->regcmd_data = &ctx->regcmd_data[ctx->running_job_count]; in rve_job_alloc()
162 job->scheduler = rve_drvdata->scheduler[0]; in rve_job_alloc()
163 job->core = rve_drvdata->scheduler[0]->core; in rve_job_alloc()
164 job->ctx = ctx; in rve_job_alloc()
165 ctx->scheduler = job->scheduler; in rve_job_alloc()
166 job->session = ctx->session; in rve_job_alloc()
170 job->priority = RVE_SCHED_PRIORITY_MAX; in rve_job_alloc()
172 job->priority = ctx->priority; in rve_job_alloc()
175 return job; in rve_job_alloc()
178 static void rve_job_dump_info(struct rve_job *job) in rve_job_dump_info() argument
180 pr_info("job: priority = %d, core = %d\n", in rve_job_dump_info()
181 job->priority, job->core); in rve_job_dump_info()
184 static int rve_job_run(struct rve_job *job) in rve_job_run() argument
189 scheduler = rve_job_get_scheduler(job); in rve_job_run()
200 ret = scheduler->ops->init_reg(job); in rve_job_run()
206 ret = scheduler->ops->set_reg(job, scheduler); in rve_job_run()
214 rve_job_dump_info(job); in rve_job_run()
228 struct rve_job *job = NULL; in rve_job_next() local
240 job = list_first_entry(&scheduler->todo_list, struct rve_job, head); in rve_job_next()
242 list_del_init(&job->head); in rve_job_next()
246 scheduler->running_job = job; in rve_job_next()
250 job->ret = rve_job_run(job); in rve_job_next()
253 if (job->ret < 0) { in rve_job_next()
263 rve_internal_ctx_signal(job); in rve_job_next()
269 static void rve_job_finish_and_next(struct rve_job *job, int ret) in rve_job_finish_and_next() argument
274 job->ret = ret; in rve_job_finish_and_next()
276 scheduler = rve_job_get_scheduler(job); in rve_job_finish_and_next()
279 pr_info("hw use time = %lld\n", ktime_us_delta(now, job->hw_running_time)); in rve_job_finish_and_next()
280 pr_info("(pid:%d) job done use time = %lld\n", job->pid, in rve_job_finish_and_next()
281 ktime_us_delta(now, job->timestamp)); in rve_job_finish_and_next()
284 rve_internal_ctx_signal(job); in rve_job_finish_and_next()
295 struct rve_job *job; in rve_job_done() local
305 job = scheduler->running_job; in rve_job_done()
308 scheduler->timer.busy_time += ktime_us_delta(now, job->hw_recoder_time); in rve_job_done()
310 rve_scheduler_set_pid_info(job, now); in rve_job_done()
314 spin_lock_irqsave(&job->ctx->lock, flags); in rve_job_done()
316 job->ctx->debug_info.max_cost_time_per_sec = in rve_job_done()
317 max(job->ctx->debug_info.last_job_hw_use_time, in rve_job_done()
318 job->ctx->debug_info.max_cost_time_per_sec); in rve_job_done()
319 job->ctx->debug_info.last_job_hw_use_time = ktime_us_delta(now, job->hw_running_time); in rve_job_done()
320 job->ctx->debug_info.hw_time_total += job->ctx->debug_info.last_job_hw_use_time; in rve_job_done()
321 job->ctx->debug_info.last_job_use_time = ktime_us_delta(now, job->timestamp); in rve_job_done()
323 spin_unlock_irqrestore(&job->ctx->lock, flags); in rve_job_done()
326 cmd_reg = job->regcmd_data->cmd_reg; in rve_job_done()
332 rve_get_monitor_info(job); in rve_job_done()
340 rve_job_finish_and_next(job, ret); in rve_job_done()
346 struct rve_job *job = NULL; in rve_job_timeout_clean() local
351 job = scheduler->running_job; in rve_job_timeout_clean()
352 if (job && (job->flags & RVE_ASYNC) && in rve_job_timeout_clean()
353 (ktime_to_ms(ktime_sub(now, job->hw_running_time)) >= RVE_ASYNC_TIMEOUT_DELAY)) { in rve_job_timeout_clean()
360 rve_internal_ctx_signal(job); in rve_job_timeout_clean()
370 static struct rve_scheduler_t *rve_job_schedule(struct rve_job *job) in rve_job_schedule() argument
377 scheduler = rve_job_get_scheduler(job); in rve_job_schedule()
390 || (job->priority == RVE_SCHED_PRIORITY_DEFAULT)) { in rve_job_schedule()
391 list_add_tail(&job->head, &scheduler->todo_list); in rve_job_schedule()
394 if (job->priority > job_pos->priority && in rve_job_schedule()
396 list_add(&job->head, &job_pos->head); in rve_job_schedule()
409 list_add_tail(&job->head, &scheduler->todo_list); in rve_job_schedule()
421 static void rve_job_abort_running(struct rve_job *job) in rve_job_abort_running() argument
426 scheduler = rve_job_get_scheduler(job); in rve_job_abort_running()
430 /* invalid job */ in rve_job_abort_running()
431 if (job == scheduler->running_job) in rve_job_abort_running()
436 rve_job_cleanup(job); in rve_job_abort_running()
439 static void rve_job_abort_invalid(struct rve_job *job) in rve_job_abort_invalid() argument
441 rve_job_cleanup(job); in rve_job_abort_invalid()
444 static inline int rve_job_wait(struct rve_job *job) in rve_job_wait() argument
452 scheduler = rve_job_get_scheduler(job); in rve_job_wait()
455 job->ctx->finished_job_count == job->ctx->cmd_num, in rve_job_wait()
456 RVE_SYNC_TIMEOUT_DELAY * job->ctx->cmd_num); in rve_job_wait()
476 ktime_to_us(ktime_sub(now, job->hw_running_time))); in rve_job_wait()
493 pr_err("rve job wait in_fence signal use time = %lld\n", in rve_job_input_fence_signaled()
494 ktime_to_us(ktime_sub(now, waiter->job->timestamp))); in rve_job_input_fence_signaled()
496 scheduler = rve_job_schedule(waiter->job); in rve_job_input_fence_signaled()
647 struct rve_job *job = NULL; in rve_job_commit() local
654 job = rve_job_alloc(ctx); in rve_job_commit()
655 if (!job) { in rve_job_commit()
656 pr_err("failed to alloc rve job!\n"); in rve_job_commit()
662 job->flags |= RVE_ASYNC; in rve_job_commit()
665 ret = rve_out_fence_alloc(job); in rve_job_commit()
667 rve_job_free(job); in rve_job_commit()
672 ctx->out_fence = job->out_fence; in rve_job_commit()
674 ctx->out_fence_fd = rve_out_fence_get_fd(job); in rve_job_commit()
689 rve_job_free(job); in rve_job_commit()
699 scheduler = rve_job_schedule(job); in rve_job_commit()
708 ret = rve_add_dma_fence_callback(job, in rve_job_commit()
713 rve_job_free(job); in rve_job_commit()
718 rve_job_free(job); in rve_job_commit()
722 scheduler = rve_job_schedule(job); in rve_job_commit()
737 /* RVE_SYNC: wait until job finish */ in rve_job_commit()
739 scheduler = rve_job_schedule(job); in rve_job_commit()
747 ret = job->ret; in rve_job_commit()
749 pr_err("some error on job, %s(%d)\n", __func__, in rve_job_commit()
754 ret = rve_job_wait(job); in rve_job_commit()
758 rve_job_cleanup(job); in rve_job_commit()
763 rve_job_abort_invalid(job); in rve_job_commit()
768 rve_job_abort_running(job); in rve_job_commit()
822 int rve_internal_ctx_signal(struct rve_job *job) in rve_internal_ctx_signal() argument
829 scheduler = rve_job_get_scheduler(job); in rve_internal_ctx_signal()
835 ctx = rve_job_get_internal_ctx(job); in rve_internal_ctx_signal()
841 ctx->regcmd_data = job->regcmd_data; in rve_internal_ctx_signal()
855 job->flags |= RVE_JOB_DONE; in rve_internal_ctx_signal()
866 if (job->flags & RVE_ASYNC) { in rve_internal_ctx_signal()
867 rve_job_cleanup(job); in rve_internal_ctx_signal()
937 struct rve_job *job_pos, *job_q, *job; in rve_internal_ctx_kref_release() local
959 job = job_pos; in rve_internal_ctx_kref_release()
968 job = scheduler->running_job; in rve_internal_ctx_kref_release()
970 if (job->ctx->id == ctx->id) { in rve_internal_ctx_kref_release()
972 scheduler->timer.busy_time += ktime_us_delta(now, job->hw_recoder_time); in rve_internal_ctx_kref_release()
983 rve_job_finish_and_next(job, 0); in rve_internal_ctx_kref_release()