xref: /OK3568_Linux_fs/kernel/drivers/video/rockchip/rve/rve_job.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) Rockchip Electronics Co., Ltd.
4  *
5  * Author: Huang Lee <Putin.li@rock-chips.com>
6  */
7 
8 #define pr_fmt(fmt) "rve_job: " fmt
9 
10 #include "rve_job.h"
11 #include "rve_fence.h"
12 #include "rve_reg.h"
13 
14 struct rve_job *
rve_scheduler_get_pending_job_list(struct rve_scheduler_t * scheduler)15 rve_scheduler_get_pending_job_list(struct rve_scheduler_t *scheduler)
16 {
17 	unsigned long flags;
18 	struct rve_job *job;
19 
20 	spin_lock_irqsave(&scheduler->irq_lock, flags);
21 
22 	job = list_first_entry_or_null(&scheduler->todo_list,
23 		struct rve_job, head);
24 
25 	spin_unlock_irqrestore(&scheduler->irq_lock, flags);
26 
27 	return job;
28 }
29 
30 struct rve_job *
rve_scheduler_get_running_job(struct rve_scheduler_t * scheduler)31 rve_scheduler_get_running_job(struct rve_scheduler_t *scheduler)
32 {
33 	unsigned long flags;
34 	struct rve_job *job;
35 
36 	spin_lock_irqsave(&scheduler->irq_lock, flags);
37 
38 	job = scheduler->running_job;
39 
40 	spin_unlock_irqrestore(&scheduler->irq_lock, flags);
41 
42 	return job;
43 }
44 
rve_scheduler_set_pid_info(struct rve_job * job,ktime_t now)45 static void rve_scheduler_set_pid_info(struct rve_job *job, ktime_t now)
46 {
47 	struct rve_scheduler_t *scheduler;
48 	bool pid_match_flag = false;
49 	ktime_t tmp = 0;
50 	int pid_mark = 0, i;
51 
52 	scheduler = rve_job_get_scheduler(job);
53 
54 	for (i = 0; i < RVE_MAX_PID_INFO; i++) {
55 		if (scheduler->session.pid_info[i].pid == 0)
56 			scheduler->session.pid_info[i].pid = job->pid;
57 
58 		if (scheduler->session.pid_info[i].pid == job->pid) {
59 			pid_match_flag = true;
60 			scheduler->session.pid_info[i].hw_time_total +=
61 				(job->hw_running_time - now);
62 			break;
63 		}
64 	}
65 
66 	if (!pid_match_flag) {
67 		for (i = 0; i < RVE_MAX_PID_INFO; i++) {
68 			if (i == 0) {
69 				tmp = scheduler->session.pid_info[i].hw_time_total;
70 				continue;
71 			}
72 
73 			if (tmp > scheduler->session.pid_info[i].hw_time_total)
74 				pid_mark = i;
75 		}
76 
77 		scheduler->session.pid_info[pid_mark].pid = job->pid;
78 		scheduler->session.pid_info[pid_mark].hw_time_total +=
79 					ktime_us_delta(now, job->hw_running_time);
80 	}
81 }
82 
rve_job_get_scheduler(struct rve_job * job)83 struct rve_scheduler_t *rve_job_get_scheduler(struct rve_job *job)
84 {
85 	return job->scheduler;
86 }
87 
rve_job_get_internal_ctx(struct rve_job * job)88 struct rve_internal_ctx_t *rve_job_get_internal_ctx(struct rve_job *job)
89 {
90 	return job->ctx;
91 }
92 
rve_job_free(struct rve_job * job)93 static void rve_job_free(struct rve_job *job)
94 {
95 #ifdef CONFIG_SYNC_FILE
96 	if (job->out_fence)
97 		dma_fence_put(job->out_fence);
98 #endif
99 
100 	free_page((unsigned long)job);
101 }
102 
rve_job_cleanup(struct rve_job * job)103 static int rve_job_cleanup(struct rve_job *job)
104 {
105 	ktime_t now = ktime_get();
106 
107 	if (DEBUGGER_EN(TIME)) {
108 		pr_info("(pid:%d) job clean use time = %lld\n", job->pid,
109 			ktime_us_delta(now, job->timestamp));
110 	}
111 	rve_job_free(job);
112 
113 	return 0;
114 }
115 
rve_job_session_destroy(struct rve_session * session)116 void rve_job_session_destroy(struct rve_session *session)
117 {
118 	struct rve_scheduler_t *scheduler = NULL;
119 	struct rve_job *job_pos, *job_q;
120 	int i;
121 
122 	unsigned long flags;
123 
124 	for (i = 0; i < rve_drvdata->num_of_scheduler; i++) {
125 		scheduler = rve_drvdata->scheduler[i];
126 
127 		spin_lock_irqsave(&scheduler->irq_lock, flags);
128 
129 		list_for_each_entry_safe(job_pos, job_q, &scheduler->todo_list, head) {
130 			if (session == job_pos->session) {
131 				list_del(&job_pos->head);
132 
133 				spin_unlock_irqrestore(&scheduler->irq_lock, flags);
134 
135 				rve_job_free(job_pos);
136 
137 				spin_lock_irqsave(&scheduler->irq_lock, flags);
138 			}
139 		}
140 
141 		spin_unlock_irqrestore(&scheduler->irq_lock, flags);
142 	}
143 }
144 
rve_job_alloc(struct rve_internal_ctx_t * ctx)145 static struct rve_job *rve_job_alloc(struct rve_internal_ctx_t *ctx)
146 {
147 	struct rve_job *job = NULL;
148 
149 	job = (struct rve_job *)get_zeroed_page(GFP_KERNEL | GFP_DMA32);
150 	if (!job)
151 		return NULL;
152 
153 #ifdef CONFIG_SYNC_FILE
154 	spin_lock_init(&job->fence_lock);
155 #endif
156 	INIT_LIST_HEAD(&job->head);
157 
158 	job->timestamp = ktime_get();
159 	job->pid = current->pid;
160 	job->regcmd_data = &ctx->regcmd_data[ctx->running_job_count];
161 
162 	job->scheduler = rve_drvdata->scheduler[0];
163 	job->core = rve_drvdata->scheduler[0]->core;
164 	job->ctx = ctx;
165 	ctx->scheduler = job->scheduler;
166 	job->session = ctx->session;
167 
168 	if (ctx->priority > 0) {
169 		if (ctx->priority > RVE_SCHED_PRIORITY_MAX)
170 			job->priority = RVE_SCHED_PRIORITY_MAX;
171 		else
172 			job->priority = ctx->priority;
173 	}
174 
175 	return job;
176 }
177 
rve_job_dump_info(struct rve_job * job)178 static void rve_job_dump_info(struct rve_job *job)
179 {
180 	pr_info("job: priority = %d, core = %d\n",
181 		job->priority, job->core);
182 }
183 
rve_job_run(struct rve_job * job)184 static int rve_job_run(struct rve_job *job)
185 {
186 	struct rve_scheduler_t *scheduler;
187 	int ret = 0;
188 
189 	scheduler = rve_job_get_scheduler(job);
190 
191 #ifndef RVE_PD_AWAYS_ON
192 	/* enable power */
193 	ret = rve_power_enable(scheduler);
194 	if (ret < 0) {
195 		pr_err("power enable failed");
196 		return ret;
197 	}
198 #endif
199 
200 	ret = scheduler->ops->init_reg(job);
201 	if (ret < 0) {
202 		pr_err("init reg failed");
203 		goto failed;
204 	}
205 
206 	ret = scheduler->ops->set_reg(job, scheduler);
207 	if (ret < 0) {
208 		pr_err("set reg failed");
209 		goto failed;
210 	}
211 
212 	/* for debug */
213 	if (DEBUGGER_EN(MSG))
214 		rve_job_dump_info(job);
215 
216 	return ret;
217 
218 failed:
219 #ifndef RVE_PD_AWAYS_ON
220 	rve_power_disable(scheduler);
221 #endif
222 
223 	return ret;
224 }
225 
rve_job_next(struct rve_scheduler_t * scheduler)226 static void rve_job_next(struct rve_scheduler_t *scheduler)
227 {
228 	struct rve_job *job = NULL;
229 	unsigned long flags;
230 
231 next_job:
232 	spin_lock_irqsave(&scheduler->irq_lock, flags);
233 
234 	if (scheduler->running_job ||
235 		list_empty(&scheduler->todo_list)) {
236 		spin_unlock_irqrestore(&scheduler->irq_lock, flags);
237 		return;
238 	}
239 
240 	job = list_first_entry(&scheduler->todo_list, struct rve_job, head);
241 
242 	list_del_init(&job->head);
243 
244 	scheduler->job_count--;
245 
246 	scheduler->running_job = job;
247 
248 	spin_unlock_irqrestore(&scheduler->irq_lock, flags);
249 
250 	job->ret = rve_job_run(job);
251 
252 	/* If some error before hw run */
253 	if (job->ret < 0) {
254 		pr_err("some error on rve_job_run before hw start, %s(%d)\n",
255 			__func__, __LINE__);
256 
257 		spin_lock_irqsave(&scheduler->irq_lock, flags);
258 
259 		scheduler->running_job = NULL;
260 
261 		spin_unlock_irqrestore(&scheduler->irq_lock, flags);
262 
263 		rve_internal_ctx_signal(job);
264 
265 		goto next_job;
266 	}
267 }
268 
rve_job_finish_and_next(struct rve_job * job,int ret)269 static void rve_job_finish_and_next(struct rve_job *job, int ret)
270 {
271 	ktime_t now = ktime_get();
272 	struct rve_scheduler_t *scheduler;
273 
274 	job->ret = ret;
275 
276 	scheduler = rve_job_get_scheduler(job);
277 
278 	if (DEBUGGER_EN(TIME)) {
279 		pr_info("hw use time = %lld\n", ktime_us_delta(now, job->hw_running_time));
280 		pr_info("(pid:%d) job done use time = %lld\n", job->pid,
281 			ktime_us_delta(now, job->timestamp));
282 	}
283 
284 	rve_internal_ctx_signal(job);
285 
286 	rve_job_next(scheduler);
287 
288 #ifndef RVE_PD_AWAYS_ON
289 	rve_power_disable(scheduler);
290 #endif
291 }
292 
rve_job_done(struct rve_scheduler_t * scheduler,int ret)293 void rve_job_done(struct rve_scheduler_t *scheduler, int ret)
294 {
295 	struct rve_job *job;
296 	unsigned long flags;
297 	u32 error_flag;
298 	uint32_t *cmd_reg;
299 	int i;
300 
301 	ktime_t now = ktime_get();
302 
303 	spin_lock_irqsave(&scheduler->irq_lock, flags);
304 
305 	job = scheduler->running_job;
306 	scheduler->running_job = NULL;
307 
308 	scheduler->timer.busy_time += ktime_us_delta(now, job->hw_recoder_time);
309 
310 	rve_scheduler_set_pid_info(job, now);
311 
312 	spin_unlock_irqrestore(&scheduler->irq_lock, flags);
313 
314 	spin_lock_irqsave(&job->ctx->lock, flags);
315 
316 	job->ctx->debug_info.max_cost_time_per_sec =
317 		max(job->ctx->debug_info.last_job_hw_use_time,
318 			job->ctx->debug_info.max_cost_time_per_sec);
319 	job->ctx->debug_info.last_job_hw_use_time = ktime_us_delta(now, job->hw_running_time);
320 	job->ctx->debug_info.hw_time_total += job->ctx->debug_info.last_job_hw_use_time;
321 	job->ctx->debug_info.last_job_use_time = ktime_us_delta(now, job->timestamp);
322 
323 	spin_unlock_irqrestore(&job->ctx->lock, flags);
324 
325 	/* record CFG REG copy to user */
326 	cmd_reg = job->regcmd_data->cmd_reg;
327 	for (i = 0; i < 40; i++)
328 		cmd_reg[18 + i] = rve_read(RVE_CFG_REG + i * 4, scheduler);
329 
330 	error_flag = rve_read(RVE_SWREG6_IVE_WORK_STA, scheduler);
331 
332 	rve_get_monitor_info(job);
333 
334 	if (DEBUGGER_EN(MSG))
335 		pr_info("irq thread work_status[%.8x]\n", error_flag);
336 
337 	/* disable llp enable, TODO: support pause mode */
338 	rve_write(0, RVE_SWLTB3_ENABLE, scheduler);
339 
340 	rve_job_finish_and_next(job, ret);
341 }
342 
rve_job_timeout_clean(struct rve_scheduler_t * scheduler)343 static void rve_job_timeout_clean(struct rve_scheduler_t *scheduler)
344 {
345 	unsigned long flags;
346 	struct rve_job *job = NULL;
347 	ktime_t now = ktime_get();
348 
349 	spin_lock_irqsave(&scheduler->irq_lock, flags);
350 
351 	job = scheduler->running_job;
352 	if (job && (job->flags & RVE_ASYNC) &&
353 	   (ktime_to_ms(ktime_sub(now, job->hw_running_time)) >= RVE_ASYNC_TIMEOUT_DELAY)) {
354 		scheduler->running_job = NULL;
355 
356 		spin_unlock_irqrestore(&scheduler->irq_lock, flags);
357 
358 		scheduler->ops->soft_reset(scheduler);
359 
360 		rve_internal_ctx_signal(job);
361 
362 #ifndef RVE_PD_AWAYS_ON
363 		rve_power_disable(scheduler);
364 #endif
365 	} else {
366 		spin_unlock_irqrestore(&scheduler->irq_lock, flags);
367 	}
368 }
369 
rve_job_schedule(struct rve_job * job)370 static struct rve_scheduler_t *rve_job_schedule(struct rve_job *job)
371 {
372 	unsigned long flags;
373 	struct rve_scheduler_t *scheduler = NULL;
374 	struct rve_job *job_pos;
375 	bool first_match = 0;
376 
377 	scheduler = rve_job_get_scheduler(job);
378 	if (scheduler == NULL) {
379 		pr_err("failed to get scheduler, %s(%d)\n", __func__, __LINE__);
380 		return NULL;
381 	}
382 
383 	/* Only async will timeout clean */
384 	rve_job_timeout_clean(scheduler);
385 
386 	spin_lock_irqsave(&scheduler->irq_lock, flags);
387 
388 	/* priority policy set by userspace */
389 	if (list_empty(&scheduler->todo_list)
390 		|| (job->priority == RVE_SCHED_PRIORITY_DEFAULT)) {
391 		list_add_tail(&job->head, &scheduler->todo_list);
392 	} else {
393 		list_for_each_entry(job_pos, &scheduler->todo_list, head) {
394 			if (job->priority > job_pos->priority &&
395 					(!first_match)) {
396 				list_add(&job->head, &job_pos->head);
397 				first_match = true;
398 			}
399 
400 			/*
401 			 * Increase the priority of subsequent tasks
402 			 * after inserting into the list
403 			 */
404 			if (first_match)
405 				job_pos->priority++;
406 		}
407 
408 		if (!first_match)
409 			list_add_tail(&job->head, &scheduler->todo_list);
410 	}
411 
412 	scheduler->job_count++;
413 
414 	spin_unlock_irqrestore(&scheduler->irq_lock, flags);
415 
416 	rve_job_next(scheduler);
417 
418 	return scheduler;
419 }
420 
rve_job_abort_running(struct rve_job * job)421 static void rve_job_abort_running(struct rve_job *job)
422 {
423 	unsigned long flags;
424 	struct rve_scheduler_t *scheduler;
425 
426 	scheduler = rve_job_get_scheduler(job);
427 
428 	spin_lock_irqsave(&scheduler->irq_lock, flags);
429 
430 	/* invalid job */
431 	if (job == scheduler->running_job)
432 		scheduler->running_job = NULL;
433 
434 	spin_unlock_irqrestore(&scheduler->irq_lock, flags);
435 
436 	rve_job_cleanup(job);
437 }
438 
rve_job_abort_invalid(struct rve_job * job)439 static void rve_job_abort_invalid(struct rve_job *job)
440 {
441 	rve_job_cleanup(job);
442 }
443 
rve_job_wait(struct rve_job * job)444 static inline int rve_job_wait(struct rve_job *job)
445 {
446 	struct rve_scheduler_t *scheduler;
447 
448 	int left_time;
449 	ktime_t now;
450 	int ret;
451 
452 	scheduler = rve_job_get_scheduler(job);
453 
454 	left_time = wait_event_timeout(scheduler->job_done_wq,
455 		job->ctx->finished_job_count == job->ctx->cmd_num,
456 		RVE_SYNC_TIMEOUT_DELAY * job->ctx->cmd_num);
457 
458 	switch (left_time) {
459 	case 0:
460 		pr_err("%s timeout", __func__);
461 		scheduler->ops->soft_reset(scheduler);
462 		ret = -EBUSY;
463 		break;
464 	case -ERESTARTSYS:
465 		ret = -ERESTARTSYS;
466 		break;
467 	default:
468 		ret = 0;
469 		break;
470 	}
471 
472 	now = ktime_get();
473 
474 	if (DEBUGGER_EN(TIME))
475 		pr_info("%s use time = %lld\n", __func__,
476 			 ktime_to_us(ktime_sub(now, job->hw_running_time)));
477 
478 	return ret;
479 }
480 
481 #ifdef CONFIG_SYNC_FILE
rve_job_input_fence_signaled(struct dma_fence * fence,struct dma_fence_cb * _waiter)482 static void rve_job_input_fence_signaled(struct dma_fence *fence,
483 					 struct dma_fence_cb *_waiter)
484 {
485 	struct rve_fence_waiter *waiter = (struct rve_fence_waiter *)_waiter;
486 	struct rve_scheduler_t *scheduler = NULL;
487 
488 	ktime_t now;
489 
490 	now = ktime_get();
491 
492 	if (DEBUGGER_EN(TIME))
493 		pr_err("rve job wait in_fence signal use time = %lld\n",
494 			ktime_to_us(ktime_sub(now, waiter->job->timestamp)));
495 
496 	scheduler = rve_job_schedule(waiter->job);
497 
498 	if (scheduler == NULL)
499 		pr_err("failed to get scheduler, %s(%d)\n", __func__, __LINE__);
500 
501 	kfree(waiter);
502 }
503 #endif
504 
rve_job_config_by_user_ctx(struct rve_user_ctx_t * user_ctx)505 int rve_job_config_by_user_ctx(struct rve_user_ctx_t *user_ctx)
506 {
507 	struct rve_pending_ctx_manager *ctx_manager;
508 	struct rve_internal_ctx_t *ctx;
509 	int ret = 0;
510 	unsigned long flags;
511 
512 	ctx_manager = rve_drvdata->pend_ctx_manager;
513 
514 	ctx = rve_internal_ctx_lookup(ctx_manager, user_ctx->id);
515 	if (IS_ERR_OR_NULL(ctx)) {
516 		pr_err("can not find internal ctx from id[%d]", user_ctx->id);
517 		return -EINVAL;
518 	}
519 
520 	spin_lock_irqsave(&ctx->lock, flags);
521 
522 	if (ctx->is_running) {
523 		pr_err("can not re-config when ctx is running");
524 		spin_unlock_irqrestore(&ctx->lock, flags);
525 		return -EFAULT;
526 	}
527 
528 	spin_unlock_irqrestore(&ctx->lock, flags);
529 
530 	/* TODO: user cmd_num */
531 	user_ctx->cmd_num = 1;
532 
533 	if (ctx->regcmd_data == NULL) {
534 		ctx->regcmd_data = kmalloc_array(user_ctx->cmd_num,
535 			sizeof(struct rve_cmd_reg_array_t), GFP_KERNEL);
536 		if (ctx->regcmd_data == NULL) {
537 			pr_err("regcmd_data alloc error!\n");
538 			return -ENOMEM;
539 		}
540 	}
541 
542 	if (unlikely(copy_from_user(ctx->regcmd_data,
543 					u64_to_user_ptr(user_ctx->regcmd_data),
544 				    sizeof(struct rve_cmd_reg_array_t) * user_ctx->cmd_num))) {
545 		pr_err("regcmd_data copy_from_user failed\n");
546 		ret = -EFAULT;
547 
548 		goto err_free_regcmd_data;
549 	}
550 
551 	ctx->sync_mode = user_ctx->sync_mode;
552 	ctx->cmd_num = user_ctx->cmd_num;
553 	ctx->priority = user_ctx->priority;
554 	ctx->in_fence_fd = user_ctx->in_fence_fd;
555 
556 	/* TODO: cmd addr */
557 
558 	return ret;
559 
560 err_free_regcmd_data:
561 	kfree(ctx->regcmd_data);
562 	return ret;
563 }
564 
rve_job_commit_by_user_ctx(struct rve_user_ctx_t * user_ctx)565 int rve_job_commit_by_user_ctx(struct rve_user_ctx_t *user_ctx)
566 {
567 	struct rve_pending_ctx_manager *ctx_manager;
568 	struct rve_internal_ctx_t *ctx;
569 	int ret = 0;
570 	unsigned long flags;
571 	int i;
572 
573 	ctx_manager = rve_drvdata->pend_ctx_manager;
574 
575 	ctx = rve_internal_ctx_lookup(ctx_manager, user_ctx->id);
576 	if (IS_ERR_OR_NULL(ctx)) {
577 		pr_err("can not find internal ctx from id[%d]", user_ctx->id);
578 		return -EINVAL;
579 	}
580 
581 	spin_lock_irqsave(&ctx->lock, flags);
582 
583 	if (ctx->is_running) {
584 		pr_err("can not re-config when ctx is running");
585 		spin_unlock_irqrestore(&ctx->lock, flags);
586 		return -EFAULT;
587 	}
588 
589 	/* Reset */
590 	ctx->finished_job_count = 0;
591 	ctx->running_job_count = 0;
592 	ctx->is_running = true;
593 	ctx->disable_auto_cancel = user_ctx->disable_auto_cancel;
594 
595 	ctx->sync_mode = user_ctx->sync_mode;
596 	if (ctx->sync_mode == 0)
597 		ctx->sync_mode = RVE_SYNC;
598 
599 	spin_unlock_irqrestore(&ctx->lock, flags);
600 
601 	for (i = 0; i < ctx->cmd_num; i++) {
602 		ret = rve_job_commit(ctx);
603 		if (ret < 0) {
604 			pr_err("rve_job_commit failed, i = %d\n", i);
605 			return -EFAULT;
606 		}
607 
608 		ctx->running_job_count++;
609 	}
610 
611 	user_ctx->out_fence_fd = ctx->out_fence_fd;
612 
613 	if (unlikely(copy_to_user(u64_to_user_ptr(user_ctx->regcmd_data),
614 				  ctx->regcmd_data,
615 				  sizeof(struct rve_cmd_reg_array_t) * ctx->cmd_num))) {
616 		pr_err("ctx->regcmd_data copy_to_user failed\n");
617 		return -EFAULT;
618 	}
619 
620 	if (!ctx->disable_auto_cancel && ctx->sync_mode == RVE_SYNC)
621 		kref_put(&ctx->refcount, rve_internal_ctx_kref_release);
622 
623 	return ret;
624 }
625 
rve_job_cancel_by_user_ctx(uint32_t ctx_id)626 int rve_job_cancel_by_user_ctx(uint32_t ctx_id)
627 {
628 	struct rve_pending_ctx_manager *ctx_manager;
629 	struct rve_internal_ctx_t *ctx;
630 	int ret = 0;
631 
632 	ctx_manager = rve_drvdata->pend_ctx_manager;
633 
634 	ctx = rve_internal_ctx_lookup(ctx_manager, ctx_id);
635 	if (IS_ERR_OR_NULL(ctx)) {
636 		pr_err("can not find internal ctx from id[%d]", ctx_id);
637 		return -EINVAL;
638 	}
639 
640 	kref_put(&ctx->refcount, rve_internal_ctx_kref_release);
641 
642 	return ret;
643 }
644 
rve_job_commit(struct rve_internal_ctx_t * ctx)645 int rve_job_commit(struct rve_internal_ctx_t *ctx)
646 {
647 	struct rve_job *job = NULL;
648 	struct rve_scheduler_t *scheduler = NULL;
649 #ifdef CONFIG_SYNC_FILE
650 	struct dma_fence *in_fence;
651 #endif
652 	int ret = 0;
653 
654 	job = rve_job_alloc(ctx);
655 	if (!job) {
656 		pr_err("failed to alloc rve job!\n");
657 		return -ENOMEM;
658 	}
659 
660 	if (ctx->sync_mode == RVE_ASYNC) {
661 #ifdef CONFIG_SYNC_FILE
662 		job->flags |= RVE_ASYNC;
663 
664 		if (!ctx->out_fence) {
665 			ret = rve_out_fence_alloc(job);
666 			if (ret) {
667 				rve_job_free(job);
668 				return ret;
669 			}
670 		}
671 
672 		ctx->out_fence = job->out_fence;
673 
674 		ctx->out_fence_fd = rve_out_fence_get_fd(job);
675 
676 		if (ctx->out_fence_fd < 0)
677 			pr_err("out fence get fd failed");
678 
679 		if (DEBUGGER_EN(MSG))
680 			pr_info("in_fence_fd = %d", ctx->in_fence_fd);
681 
682 		/* if input fence is valiable */
683 		if (ctx->in_fence_fd > 0) {
684 			in_fence = rve_get_input_fence(
685 				ctx->in_fence_fd);
686 			if (!in_fence) {
687 				pr_err("%s: failed to get input dma_fence\n",
688 					 __func__);
689 				rve_job_free(job);
690 				return ret;
691 			}
692 
693 			/* close input fence fd */
694 			ksys_close(ctx->in_fence_fd);
695 
696 			ret = dma_fence_get_status(in_fence);
697 			/* ret = 1: fence has been signaled */
698 			if (ret == 1) {
699 				scheduler = rve_job_schedule(job);
700 
701 				if (scheduler == NULL) {
702 					pr_err("failed to get scheduler, %s(%d)\n",
703 						 __func__, __LINE__);
704 					goto invalid_job;
705 				}
706 				/* if input fence is valid */
707 			} else if (ret == 0) {
708 				ret = rve_add_dma_fence_callback(job,
709 					in_fence, rve_job_input_fence_signaled);
710 				if (ret < 0) {
711 					pr_err("%s: failed to add fence callback\n",
712 						 __func__);
713 					rve_job_free(job);
714 					return ret;
715 				}
716 			} else {
717 				pr_err("%s: fence status error\n", __func__);
718 				rve_job_free(job);
719 				return ret;
720 			}
721 		} else {
722 			scheduler = rve_job_schedule(job);
723 
724 			if (scheduler == NULL) {
725 				pr_err("failed to get scheduler, %s(%d)\n",
726 					 __func__, __LINE__);
727 				goto invalid_job;
728 			}
729 		}
730 
731 		return ret;
732 #else
733 		pr_err("can not support ASYNC mode, please enable CONFIG_SYNC_FILE");
734 		return -EFAULT;
735 #endif
736 
737 	/* RVE_SYNC: wait until job finish */
738 	} else if (ctx->sync_mode == RVE_SYNC) {
739 		scheduler = rve_job_schedule(job);
740 
741 		if (scheduler == NULL) {
742 			pr_err("failed to get scheduler, %s(%d)\n", __func__,
743 				 __LINE__);
744 			goto invalid_job;
745 		}
746 
747 		ret = job->ret;
748 		if (ret < 0) {
749 			pr_err("some error on job, %s(%d)\n", __func__,
750 				 __LINE__);
751 			goto running_job_abort;
752 		}
753 
754 		ret = rve_job_wait(job);
755 		if (ret < 0)
756 			goto running_job_abort;
757 
758 		rve_job_cleanup(job);
759 	}
760 	return ret;
761 
762 invalid_job:
763 	rve_job_abort_invalid(job);
764 	return ret;
765 
766 /* only used by SYNC mode */
767 running_job_abort:
768 	rve_job_abort_running(job);
769 	return ret;
770 }
771 
772 struct rve_internal_ctx_t *
rve_internal_ctx_lookup(struct rve_pending_ctx_manager * ctx_manager,uint32_t id)773 rve_internal_ctx_lookup(struct rve_pending_ctx_manager *ctx_manager, uint32_t id)
774 {
775 	struct rve_internal_ctx_t *ctx = NULL;
776 	unsigned long flags;
777 
778 	spin_lock_irqsave(&ctx_manager->lock, flags);
779 
780 	ctx = idr_find(&ctx_manager->ctx_id_idr, id);
781 
782 	spin_unlock_irqrestore(&ctx_manager->lock, flags);
783 
784 	if (ctx == NULL)
785 		pr_err("can not find internal ctx from id[%d]", id);
786 
787 	return ctx;
788 }
789 
790 /*
791  * Called at driver close to release the internal ctx's id references.
792  */
rve_internal_ctx_free_remove_idr_cb(int id,void * ptr,void * data)793 static int rve_internal_ctx_free_remove_idr_cb(int id, void *ptr, void *data)
794 {
795 	struct rve_internal_ctx_t *ctx = ptr;
796 
797 	idr_remove(&rve_drvdata->pend_ctx_manager->ctx_id_idr, ctx->id);
798 	kfree(ctx);
799 
800 	return 0;
801 }
802 
rve_internal_ctx_free_remove_idr(struct rve_internal_ctx_t * ctx)803 static int rve_internal_ctx_free_remove_idr(struct rve_internal_ctx_t *ctx)
804 {
805 	struct rve_pending_ctx_manager *ctx_manager;
806 	unsigned long flags;
807 
808 	ctx_manager = rve_drvdata->pend_ctx_manager;
809 
810 	spin_lock_irqsave(&ctx_manager->lock, flags);
811 
812 	ctx_manager->ctx_count--;
813 	idr_remove(&ctx_manager->ctx_id_idr, ctx->id);
814 
815 	spin_unlock_irqrestore(&ctx_manager->lock, flags);
816 
817 	kfree(ctx);
818 
819 	return 0;
820 }
821 
rve_internal_ctx_signal(struct rve_job * job)822 int rve_internal_ctx_signal(struct rve_job *job)
823 {
824 	struct rve_internal_ctx_t *ctx;
825 	struct rve_scheduler_t *scheduler;
826 	int finished_job_count;
827 	unsigned long flags;
828 
829 	scheduler = rve_job_get_scheduler(job);
830 	if (scheduler == NULL) {
831 		pr_err("failed to get scheduler, %s(%d)\n", __func__, __LINE__);
832 		return -EFAULT;
833 	}
834 
835 	ctx = rve_job_get_internal_ctx(job);
836 	if (IS_ERR_OR_NULL(ctx)) {
837 		pr_err("can not find internal ctx");
838 		return -EINVAL;
839 	}
840 
841 	ctx->regcmd_data = job->regcmd_data;
842 
843 	spin_lock_irqsave(&ctx->lock, flags);
844 
845 	finished_job_count = ++ctx->finished_job_count;
846 
847 	spin_unlock_irqrestore(&ctx->lock, flags);
848 
849 	if (finished_job_count >= ctx->cmd_num) {
850 #ifdef CONFIG_SYNC_FILE
851 		if (ctx->out_fence)
852 			dma_fence_signal(ctx->out_fence);
853 #endif
854 
855 		job->flags |= RVE_JOB_DONE;
856 
857 		wake_up(&scheduler->job_done_wq);
858 
859 		spin_lock_irqsave(&ctx->lock, flags);
860 
861 		ctx->is_running = false;
862 		ctx->out_fence = NULL;
863 
864 		spin_unlock_irqrestore(&ctx->lock, flags);
865 
866 		if (job->flags & RVE_ASYNC) {
867 			rve_job_cleanup(job);
868 			if (!ctx->disable_auto_cancel)
869 				kref_put(&ctx->refcount, rve_internal_ctx_kref_release);
870 		}
871 	}
872 
873 	return 0;
874 }
875 
rve_internal_ctx_alloc_to_get_idr_id(struct rve_session * session)876 int rve_internal_ctx_alloc_to_get_idr_id(struct rve_session *session)
877 {
878 	struct rve_pending_ctx_manager *ctx_manager;
879 	struct rve_internal_ctx_t *ctx;
880 	unsigned long flags;
881 
882 	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
883 	if (ctx == NULL) {
884 		pr_err("can not kzalloc for rve_pending_ctx_manager\n");
885 		return -ENOMEM;
886 	}
887 
888 	ctx_manager = rve_drvdata->pend_ctx_manager;
889 	if (ctx_manager == NULL) {
890 		pr_err("rve_pending_ctx_manager is null!\n");
891 		goto failed;
892 	}
893 
894 	spin_lock_init(&ctx->lock);
895 
896 	/*
897 	 * Get the user-visible handle using idr. Preload and perform
898 	 * allocation under our spinlock.
899 	 */
900 
901 	idr_preload(GFP_KERNEL);
902 
903 	spin_lock_irqsave(&ctx_manager->lock, flags);
904 
905 	ctx->id = idr_alloc(&ctx_manager->ctx_id_idr, ctx, 1, 0, GFP_ATOMIC);
906 	if (ctx->id < 0) {
907 		pr_err("idr_alloc failed");
908 		spin_unlock_irqrestore(&ctx_manager->lock, flags);
909 		goto failed;
910 	}
911 
912 	ctx_manager->ctx_count++;
913 
914 	ctx->debug_info.pid = current->pid;
915 	ctx->debug_info.timestamp = ktime_get();
916 	ctx->session = session;
917 
918 	spin_unlock_irqrestore(&ctx_manager->lock, flags);
919 
920 	idr_preload_end();
921 
922 	ctx->regcmd_data = NULL;
923 
924 	kref_init(&ctx->refcount);
925 
926 	return ctx->id;
927 
928 failed:
929 	kfree(ctx);
930 	return -EFAULT;
931 }
932 
rve_internal_ctx_kref_release(struct kref * ref)933 void rve_internal_ctx_kref_release(struct kref *ref)
934 {
935 	struct rve_internal_ctx_t *ctx;
936 	struct rve_scheduler_t *scheduler = NULL;
937 	struct rve_job *job_pos, *job_q, *job;
938 	int i;
939 	bool need_reset = false;
940 	unsigned long flags;
941 	ktime_t now = ktime_get();
942 
943 	ctx = container_of(ref, struct rve_internal_ctx_t, refcount);
944 
945 	spin_lock_irqsave(&ctx->lock, flags);
946 	if (!ctx->is_running || ctx->finished_job_count >= ctx->cmd_num) {
947 		spin_unlock_irqrestore(&ctx->lock, flags);
948 		goto free_ctx;
949 	}
950 	spin_unlock_irqrestore(&ctx->lock, flags);
951 
952 	for (i = 0; i < rve_drvdata->num_of_scheduler; i++) {
953 		scheduler = rve_drvdata->scheduler[i];
954 
955 		spin_lock_irqsave(&scheduler->irq_lock, flags);
956 
957 		list_for_each_entry_safe(job_pos, job_q, &scheduler->todo_list, head) {
958 			if (ctx->id == job_pos->ctx->id) {
959 				job = job_pos;
960 				list_del_init(&job_pos->head);
961 
962 				scheduler->job_count--;
963 			}
964 		}
965 
966 		/* for load */
967 		if (scheduler->running_job) {
968 			job = scheduler->running_job;
969 
970 			if (job->ctx->id == ctx->id) {
971 				scheduler->running_job = NULL;
972 				scheduler->timer.busy_time += ktime_us_delta(now, job->hw_recoder_time);
973 				need_reset = true;
974 			}
975 		}
976 
977 		spin_unlock_irqrestore(&scheduler->irq_lock, flags);
978 
979 		if (need_reset) {
980 			pr_err("reset core[%d] by user cancel", scheduler->core);
981 			scheduler->ops->soft_reset(scheduler);
982 
983 			rve_job_finish_and_next(job, 0);
984 		}
985 	}
986 
987 free_ctx:
988 	kfree(ctx->regcmd_data);
989 	rve_internal_ctx_free_remove_idr(ctx);
990 }
991 
rve_ctx_manager_init(struct rve_pending_ctx_manager ** ctx_manager_session)992 int rve_ctx_manager_init(struct rve_pending_ctx_manager **ctx_manager_session)
993 {
994 	struct rve_pending_ctx_manager *ctx_manager = NULL;
995 
996 	*ctx_manager_session = kzalloc(sizeof(struct rve_pending_ctx_manager), GFP_KERNEL);
997 	if (*ctx_manager_session == NULL) {
998 		pr_err("can not kzalloc for rve_pending_ctx_manager\n");
999 		return -ENOMEM;
1000 	}
1001 
1002 	ctx_manager = *ctx_manager_session;
1003 
1004 	spin_lock_init(&ctx_manager->lock);
1005 
1006 	idr_init_base(&ctx_manager->ctx_id_idr, 1);
1007 
1008 	return 0;
1009 }
1010 
rve_ctx_manager_remove(struct rve_pending_ctx_manager ** ctx_manager_session)1011 int rve_ctx_manager_remove(struct rve_pending_ctx_manager **ctx_manager_session)
1012 {
1013 	struct rve_pending_ctx_manager *ctx_manager = *ctx_manager_session;
1014 	unsigned long flags;
1015 
1016 	spin_lock_irqsave(&ctx_manager->lock, flags);
1017 
1018 	idr_for_each(&ctx_manager->ctx_id_idr, &rve_internal_ctx_free_remove_idr_cb, ctx_manager);
1019 	idr_destroy(&ctx_manager->ctx_id_idr);
1020 
1021 	spin_unlock_irqrestore(&ctx_manager->lock, flags);
1022 
1023 	kfree(*ctx_manager_session);
1024 
1025 	*ctx_manager_session = NULL;
1026 
1027 	return 0;
1028 }
1029