Lines Matching full:job

61 static int rknn_get_task_number(struct rknpu_job *job, int core_index)  in rknn_get_task_number()  argument
63 int task_num = job->args->task_number; in rknn_get_task_number()
65 if (job->use_core_num == 2) in rknn_get_task_number()
66 task_num = job->args->subcore_task[core_index].task_number; in rknn_get_task_number()
67 else if (job->use_core_num == 3) in rknn_get_task_number()
68 task_num = job->args->subcore_task[core_index + 2].task_number; in rknn_get_task_number()
73 static void rknpu_job_free(struct rknpu_job *job) in rknpu_job_free() argument
79 (struct rknpu_gem_object *)(uintptr_t)job->args->task_obj_addr; in rknpu_job_free()
84 if (job->fence) in rknpu_job_free()
85 dma_fence_put(job->fence); in rknpu_job_free()
87 if (job->args_owner) in rknpu_job_free()
88 kfree(job->args); in rknpu_job_free()
90 kfree(job); in rknpu_job_free()
93 static int rknpu_job_cleanup(struct rknpu_job *job) in rknpu_job_cleanup() argument
95 rknpu_job_free(job); in rknpu_job_cleanup()
102 struct rknpu_job *job = in rknpu_job_cleanup_work() local
105 rknpu_job_cleanup(job); in rknpu_job_cleanup_work()
111 struct rknpu_job *job = NULL; in rknpu_job_alloc() local
118 job = kzalloc(sizeof(*job), GFP_KERNEL); in rknpu_job_alloc()
119 if (!job) in rknpu_job_alloc()
122 job->timestamp = ktime_get(); in rknpu_job_alloc()
123 job->rknpu_dev = rknpu_dev; in rknpu_job_alloc()
124 job->use_core_num = (args->core_mask & RKNPU_CORE0_MASK) + in rknpu_job_alloc()
127 atomic_set(&job->run_count, job->use_core_num); in rknpu_job_alloc()
128 atomic_set(&job->interrupt_count, job->use_core_num); in rknpu_job_alloc()
136 job->args = args; in rknpu_job_alloc()
137 job->args_owner = false; in rknpu_job_alloc()
138 return job; in rknpu_job_alloc()
141 job->args = kzalloc(sizeof(*args), GFP_KERNEL); in rknpu_job_alloc()
142 if (!job->args) { in rknpu_job_alloc()
143 kfree(job); in rknpu_job_alloc()
146 *job->args = *args; in rknpu_job_alloc()
147 job->args_owner = true; in rknpu_job_alloc()
149 INIT_WORK(&job->cleanup_work, rknpu_job_cleanup_work); in rknpu_job_alloc()
151 return job; in rknpu_job_alloc()
154 static inline int rknpu_job_wait(struct rknpu_job *job) in rknpu_job_wait() argument
156 struct rknpu_device *rknpu_dev = job->rknpu_dev; in rknpu_job_wait()
157 struct rknpu_submit *args = job->args; in rknpu_job_wait()
161 int core_index = rknpu_core_index(job->args->core_mask); in rknpu_job_wait()
171 job->flags & RKNPU_JOB_DONE || in rknpu_job_wait()
176 } while (ret == 0 && job->in_queue[core_index]); in rknpu_job_wait()
178 if (job->in_queue[core_index]) { in rknpu_job_wait()
180 subcore_data->task_num -= rknn_get_task_number(job, core_index); in rknpu_job_wait()
181 if (job->use_core_num == 1) { in rknpu_job_wait()
182 list_del_init(&job->head[core_index]); in rknpu_job_wait()
183 job->in_queue[core_index] = false; in rknpu_job_wait()
184 } else if (job->use_core_num > 1) { in rknpu_job_wait()
185 for (i = 0; i < job->use_core_num; i++) { in rknpu_job_wait()
186 if (job->in_queue[i]) { in rknpu_job_wait()
187 list_del_init(&job->head[i]); in rknpu_job_wait()
188 job->in_queue[i] = false; in rknpu_job_wait()
196 last_task = job->last_task; in rknpu_job_wait()
200 last_task->int_status = job->int_status[core_index]; in rknpu_job_wait()
214 "failed to wait job, task counter: %d, flags: %#x, ret = %d, elapsed time: %lldus\n", in rknpu_job_wait()
216 ktime_to_us(ktime_sub(ktime_get(), job->timestamp))); in rknpu_job_wait()
221 if (!(job->flags & RKNPU_JOB_DONE)) in rknpu_job_wait()
229 static inline int rknpu_job_commit_pc(struct rknpu_job *job, int core_index) in rknpu_job_commit_pc() argument
231 struct rknpu_device *rknpu_dev = job->rknpu_dev; in rknpu_job_commit_pc()
232 struct rknpu_submit *args = job->args; in rknpu_job_commit_pc()
264 if (job->use_core_num == 1) { in rknpu_job_commit_pc()
271 } else if (job->use_core_num == 2) { in rknpu_job_commit_pc()
278 } else if (job->use_core_num == 3) { in rknpu_job_commit_pc()
312 job->first_task = first_task; in rknpu_job_commit_pc()
313 job->last_task = last_task; in rknpu_job_commit_pc()
314 job->int_mask[core_index] = last_task->int_mask; in rknpu_job_commit_pc()
322 static int rknpu_job_commit(struct rknpu_job *job, int core_index) in rknpu_job_commit() argument
324 struct rknpu_device *rknpu_dev = job->rknpu_dev; in rknpu_job_commit()
325 struct rknpu_submit *args = job->args; in rknpu_job_commit()
334 return rknpu_job_commit_pc(job, core_index); in rknpu_job_commit()
339 struct rknpu_job *job = NULL; in rknpu_job_next() local
350 if (subcore_data->job || list_empty(&subcore_data->todo_list)) { in rknpu_job_next()
355 job = list_first_entry(&subcore_data->todo_list, struct rknpu_job, in rknpu_job_next()
358 list_del_init(&job->head[core_index]); in rknpu_job_next()
359 job->in_queue[core_index] = false; in rknpu_job_next()
360 subcore_data->job = job; in rknpu_job_next()
361 job->hw_recoder_time = ktime_get(); in rknpu_job_next()
364 if (atomic_dec_and_test(&job->run_count)) { in rknpu_job_next()
365 if (job->args->core_mask & RKNPU_CORE0_MASK) in rknpu_job_next()
366 job->ret = rknpu_job_commit(job, 0); in rknpu_job_next()
367 if (job->args->core_mask & RKNPU_CORE1_MASK) in rknpu_job_next()
368 job->ret = rknpu_job_commit(job, 1); in rknpu_job_next()
369 if (job->args->core_mask & RKNPU_CORE2_MASK) in rknpu_job_next()
370 job->ret = rknpu_job_commit(job, 2); in rknpu_job_next()
374 static void rknpu_job_done(struct rknpu_job *job, int ret, int core_index) in rknpu_job_done() argument
376 struct rknpu_device *rknpu_dev = job->rknpu_dev; in rknpu_job_done()
384 subcore_data->job = NULL; in rknpu_job_done()
385 subcore_data->task_num -= rknn_get_task_number(job, core_index); in rknpu_job_done()
387 ktime_us_delta(now, job->hw_recoder_time); in rknpu_job_done()
390 if (atomic_dec_and_test(&job->interrupt_count)) { in rknpu_job_done()
391 int use_core_num = job->use_core_num; in rknpu_job_done()
393 job->flags |= RKNPU_JOB_DONE; in rknpu_job_done()
394 job->ret = ret; in rknpu_job_done()
396 if (job->fence) in rknpu_job_done()
397 dma_fence_signal(job->fence); in rknpu_job_done()
399 if (job->flags & RKNPU_JOB_ASYNC) in rknpu_job_done()
400 schedule_work(&job->cleanup_work); in rknpu_job_done()
411 static void rknpu_job_schedule(struct rknpu_job *job) in rknpu_job_schedule() argument
413 struct rknpu_device *rknpu_dev = job->rknpu_dev; in rknpu_job_schedule()
420 if ((job->args->core_mask & 0x07) == RKNPU_CORE_AUTO_MASK) { in rknpu_job_schedule()
439 if (!rknpu_dev->subcore_datas[task_num_list[0]].job) in rknpu_job_schedule()
441 else if (!rknpu_dev->subcore_datas[task_num_list[1]].job) in rknpu_job_schedule()
443 else if (!rknpu_dev->subcore_datas[task_num_list[2]].job) in rknpu_job_schedule()
448 job->args->core_mask = rknpu_core_mask(core_index); in rknpu_job_schedule()
449 job->use_core_num = 1; in rknpu_job_schedule()
450 atomic_set(&job->run_count, job->use_core_num); in rknpu_job_schedule()
451 atomic_set(&job->interrupt_count, job->use_core_num); in rknpu_job_schedule()
456 if (job->args->core_mask & rknpu_core_mask(i)) { in rknpu_job_schedule()
458 list_add_tail(&job->head[i], &subcore_data->todo_list); in rknpu_job_schedule()
459 subcore_data->task_num += rknn_get_task_number(job, i); in rknpu_job_schedule()
460 job->in_queue[i] = true; in rknpu_job_schedule()
466 if (job->args->core_mask & rknpu_core_mask(i)) in rknpu_job_schedule()
471 static void rknpu_job_abort(struct rknpu_job *job) in rknpu_job_abort() argument
473 struct rknpu_device *rknpu_dev = job->rknpu_dev; in rknpu_job_abort()
481 if (job->args->core_mask & rknpu_core_mask(i)) { in rknpu_job_abort()
484 if (job == subcore_data->job && !job->irq_entry[i]) { in rknpu_job_abort()
485 subcore_data->job = NULL; in rknpu_job_abort()
487 rknn_get_task_number(job, i); in rknpu_job_abort()
493 if (job->ret == -ETIMEDOUT) { in rknpu_job_abort()
494 LOG_ERROR("job timeout, flags: %#x:\n", job->flags); in rknpu_job_abort()
496 if (job->args->core_mask & rknpu_core_mask(i)) { in rknpu_job_abort()
503 job->int_mask[i], in rknpu_job_abort()
509 job->timestamp))); in rknpu_job_abort()
515 "job abort, flags: %#x, ret: %d, elapsed time: %lldus\n", in rknpu_job_abort()
516 job->flags, job->ret, in rknpu_job_abort()
517 ktime_to_us(ktime_sub(ktime_get(), job->timestamp))); in rknpu_job_abort()
520 rknpu_job_cleanup(job); in rknpu_job_abort()
553 struct rknpu_job *job = NULL; in rknpu_irq_handler() local
560 job = subcore_data->job; in rknpu_irq_handler()
561 if (!job) { in rknpu_irq_handler()
567 job->irq_entry[core_index] = true; in rknpu_irq_handler()
572 job->int_status[core_index] = status; in rknpu_irq_handler()
574 if (rknpu_fuzz_status(status) != job->int_mask[core_index]) { in rknpu_irq_handler()
578 job->int_mask[core_index], in rknpu_irq_handler()
587 rknpu_job_done(job, 0, core_index); in rknpu_irq_handler()
610 struct rknpu_job *job = NULL; in rknpu_job_timeout_clean() local
619 job = subcore_data->job; in rknpu_job_timeout_clean()
620 if (job && in rknpu_job_timeout_clean()
621 ktime_to_ms(ktime_sub(now, job->timestamp)) >= in rknpu_job_timeout_clean()
622 job->args->timeout) { in rknpu_job_timeout_clean()
626 subcore_data->job = NULL; in rknpu_job_timeout_clean()
631 schedule_work(&job->cleanup_work); in rknpu_job_timeout_clean()
638 job = list_first_entry( in rknpu_job_timeout_clean()
642 list_del_init(&job->head[i]); in rknpu_job_timeout_clean()
643 job->in_queue[i] = false; in rknpu_job_timeout_clean()
645 job = NULL; in rknpu_job_timeout_clean()
650 } while (job); in rknpu_job_timeout_clean()
659 struct rknpu_job *job = NULL; in rknpu_submit() local
667 job = rknpu_job_alloc(rknpu_dev, args); in rknpu_submit()
668 if (!job) { in rknpu_submit()
669 LOG_ERROR("failed to allocate rknpu job!\n"); in rknpu_submit()
706 rknpu_job_free(job); in rknpu_submit()
713 ret = rknpu_fence_alloc(job); in rknpu_submit()
715 rknpu_job_free(job); in rknpu_submit()
718 job->args->fence_fd = rknpu_fence_get_fd(job); in rknpu_submit()
719 args->fence_fd = job->args->fence_fd; in rknpu_submit()
723 rknpu_job_free(job); in rknpu_submit()
729 job->flags |= RKNPU_JOB_ASYNC; in rknpu_submit()
730 rknpu_job_timeout_clean(rknpu_dev, job->args->core_mask); in rknpu_submit()
731 rknpu_job_schedule(job); in rknpu_submit()
732 ret = job->ret; in rknpu_submit()
734 rknpu_job_abort(job); in rknpu_submit()
738 rknpu_job_schedule(job); in rknpu_submit()
740 job->ret = rknpu_job_wait(job); in rknpu_submit()
742 args->task_counter = job->args->task_counter; in rknpu_submit()
743 ret = job->ret; in rknpu_submit()
745 rknpu_job_cleanup(job); in rknpu_submit()
747 rknpu_job_abort(job); in rknpu_submit()