xref: /OK3568_Linux_fs/kernel/drivers/video/rockchip/rga3/rga_job.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) Rockchip Electronics Co., Ltd.
4  *
5  * Author: Huang Lee <Putin.li@rock-chips.com>
6  */
7 
8 #define pr_fmt(fmt) "rga_job: " fmt
9 
10 #include "rga_job.h"
11 #include "rga_fence.h"
12 #include "rga_dma_buf.h"
13 #include "rga_mm.h"
14 #include "rga_iommu.h"
15 #include "rga_debugger.h"
16 
rga_job_free(struct rga_job * job)17 static void rga_job_free(struct rga_job *job)
18 {
19 	free_page((unsigned long)job);
20 }
21 
rga_job_kref_release(struct kref * ref)22 static void rga_job_kref_release(struct kref *ref)
23 {
24 	struct rga_job *job;
25 
26 	job = container_of(ref, struct rga_job, refcount);
27 
28 	rga_job_free(job);
29 }
30 
rga_job_put(struct rga_job * job)31 static int rga_job_put(struct rga_job *job)
32 {
33 	return kref_put(&job->refcount, rga_job_kref_release);
34 }
35 
rga_job_get(struct rga_job * job)36 static void rga_job_get(struct rga_job *job)
37 {
38 	kref_get(&job->refcount);
39 }
40 
rga_job_cleanup(struct rga_job * job)41 static int rga_job_cleanup(struct rga_job *job)
42 {
43 	if (DEBUGGER_EN(TIME))
44 		pr_err("(pid:%d) job clean use time = %lld\n", job->pid,
45 			ktime_us_delta(ktime_get(), job->timestamp));
46 
47 	rga_job_put(job);
48 
49 	return 0;
50 }
51 
rga_job_judgment_support_core(struct rga_job * job)52 static int rga_job_judgment_support_core(struct rga_job *job)
53 {
54 	int ret = 0;
55 	uint32_t mm_flag;
56 	struct rga_req *req;
57 	struct rga_mm *mm;
58 
59 	req = &job->rga_command_base;
60 	mm = rga_drvdata->mm;
61 	if (mm == NULL) {
62 		pr_err("rga mm is null!\n");
63 		return -EFAULT;
64 	}
65 
66 	mutex_lock(&mm->lock);
67 
68 	if (likely(req->src.yrgb_addr > 0)) {
69 		ret = rga_mm_lookup_flag(mm, req->src.yrgb_addr);
70 		if (ret < 0)
71 			goto out_finish;
72 		else
73 			mm_flag = (uint32_t)ret;
74 
75 		if (~mm_flag & RGA_MEM_UNDER_4G) {
76 			job->flags |= RGA_JOB_UNSUPPORT_RGA_MMU;
77 			goto out_finish;
78 		}
79 	}
80 
81 	if (likely(req->dst.yrgb_addr > 0)) {
82 		ret = rga_mm_lookup_flag(mm, req->dst.yrgb_addr);
83 		if (ret < 0)
84 			goto out_finish;
85 		else
86 			mm_flag = (uint32_t)ret;
87 
88 		if (~mm_flag & RGA_MEM_UNDER_4G) {
89 			job->flags |= RGA_JOB_UNSUPPORT_RGA_MMU;
90 			goto out_finish;
91 		}
92 	}
93 
94 	if (req->pat.yrgb_addr > 0) {
95 		ret = rga_mm_lookup_flag(mm, req->pat.yrgb_addr);
96 		if (ret < 0)
97 			goto out_finish;
98 		else
99 			mm_flag = (uint32_t)ret;
100 
101 		if (~mm_flag & RGA_MEM_UNDER_4G) {
102 			job->flags |= RGA_JOB_UNSUPPORT_RGA_MMU;
103 			goto out_finish;
104 		}
105 	}
106 
107 out_finish:
108 	mutex_unlock(&mm->lock);
109 
110 	return ret;
111 }
112 
rga_job_alloc(struct rga_req * rga_command_base)113 static struct rga_job *rga_job_alloc(struct rga_req *rga_command_base)
114 {
115 	struct rga_job *job = NULL;
116 
117 	job = (struct rga_job *)get_zeroed_page(GFP_KERNEL | GFP_DMA32);
118 	if (!job)
119 		return NULL;
120 
121 	INIT_LIST_HEAD(&job->head);
122 	kref_init(&job->refcount);
123 
124 	job->timestamp = ktime_get();
125 	job->pid = current->pid;
126 
127 	job->rga_command_base = *rga_command_base;
128 
129 	if (rga_command_base->priority > 0) {
130 		if (rga_command_base->priority > RGA_SCHED_PRIORITY_MAX)
131 			job->priority = RGA_SCHED_PRIORITY_MAX;
132 		else
133 			job->priority = rga_command_base->priority;
134 	}
135 
136 	if (job->rga_command_base.handle_flag & 1) {
137 		job->flags |= RGA_JOB_USE_HANDLE;
138 
139 		rga_job_judgment_support_core(job);
140 	}
141 
142 	return job;
143 }
144 
rga_job_dump_info(struct rga_job * job)145 static void rga_job_dump_info(struct rga_job *job)
146 {
147 	pr_info("job: reqeust_id = %d, priority = %d, core = %d\n",
148 		job->request_id, job->priority, job->core);
149 }
150 
rga_job_scheduler_dump_info(struct rga_scheduler_t * scheduler)151 void rga_job_scheduler_dump_info(struct rga_scheduler_t *scheduler)
152 {
153 	struct rga_job *job_pos;
154 
155 	lockdep_assert_held(&scheduler->irq_lock);
156 
157 	pr_info("===============================================================\n");
158 	pr_info("%s core = %d job_count = %d status = %d\n",
159 		dev_driver_string(scheduler->dev),
160 		scheduler->core, scheduler->job_count, scheduler->status);
161 
162 	if (scheduler->running_job)
163 		rga_job_dump_info(scheduler->running_job);
164 
165 	list_for_each_entry(job_pos, &scheduler->todo_list, head) {
166 		rga_job_dump_info(job_pos);
167 	}
168 
169 	pr_info("===============================================================\n");
170 }
171 
rga_job_run(struct rga_job * job,struct rga_scheduler_t * scheduler)172 static int rga_job_run(struct rga_job *job, struct rga_scheduler_t *scheduler)
173 {
174 	int ret = 0;
175 
176 	/* enable power */
177 	ret = rga_power_enable(scheduler);
178 	if (ret < 0) {
179 		pr_err("power enable failed");
180 		return ret;
181 	}
182 
183 	ret = scheduler->ops->set_reg(job, scheduler);
184 	if (ret < 0) {
185 		pr_err("set reg failed");
186 		rga_power_disable(scheduler);
187 		return ret;
188 	}
189 
190 	set_bit(RGA_JOB_STATE_RUNNING, &job->state);
191 
192 	/* for debug */
193 	if (DEBUGGER_EN(MSG))
194 		rga_job_dump_info(job);
195 
196 	return ret;
197 }
198 
rga_job_next(struct rga_scheduler_t * scheduler)199 void rga_job_next(struct rga_scheduler_t *scheduler)
200 {
201 	int ret;
202 	struct rga_job *job = NULL;
203 	unsigned long flags;
204 
205 next_job:
206 	spin_lock_irqsave(&scheduler->irq_lock, flags);
207 
208 	if (scheduler->running_job ||
209 		list_empty(&scheduler->todo_list)) {
210 		spin_unlock_irqrestore(&scheduler->irq_lock, flags);
211 		return;
212 	}
213 
214 	job = list_first_entry(&scheduler->todo_list, struct rga_job, head);
215 
216 	list_del_init(&job->head);
217 
218 	scheduler->job_count--;
219 
220 	scheduler->running_job = job;
221 	set_bit(RGA_JOB_STATE_PREPARE, &job->state);
222 	rga_job_get(job);
223 
224 	spin_unlock_irqrestore(&scheduler->irq_lock, flags);
225 
226 	ret = rga_job_run(job, scheduler);
227 	/* If some error before hw run */
228 	if (ret < 0) {
229 		pr_err("some error on rga_job_run before hw start, %s(%d)\n", __func__, __LINE__);
230 
231 		spin_lock_irqsave(&scheduler->irq_lock, flags);
232 
233 		scheduler->running_job = NULL;
234 		rga_job_put(job);
235 
236 		spin_unlock_irqrestore(&scheduler->irq_lock, flags);
237 
238 		job->ret = ret;
239 		rga_request_release_signal(scheduler, job);
240 
241 		goto next_job;
242 	}
243 
244 	rga_job_put(job);
245 }
246 
rga_job_done(struct rga_scheduler_t * scheduler)247 struct rga_job *rga_job_done(struct rga_scheduler_t *scheduler)
248 {
249 	struct rga_job *job;
250 	unsigned long flags;
251 	ktime_t now = ktime_get();
252 
253 	spin_lock_irqsave(&scheduler->irq_lock, flags);
254 
255 	job = scheduler->running_job;
256 	if (job == NULL) {
257 		pr_err("core[0x%x] running job has been cleanup.\n", scheduler->core);
258 
259 		spin_unlock_irqrestore(&scheduler->irq_lock, flags);
260 		return NULL;
261 	}
262 	scheduler->running_job = NULL;
263 
264 	scheduler->timer.busy_time += ktime_us_delta(now, job->hw_recoder_time);
265 	set_bit(RGA_JOB_STATE_DONE, &job->state);
266 
267 	spin_unlock_irqrestore(&scheduler->irq_lock, flags);
268 
269 	if (scheduler->ops->read_back_reg)
270 		scheduler->ops->read_back_reg(job, scheduler);
271 
272 	if (DEBUGGER_EN(DUMP_IMAGE))
273 		rga_dump_job_image(job);
274 
275 	if (DEBUGGER_EN(TIME)) {
276 		pr_info("hw use time = %lld\n", ktime_us_delta(now, job->hw_running_time));
277 		pr_info("(pid:%d) job done use time = %lld\n", job->pid,
278 			ktime_us_delta(now, job->timestamp));
279 	}
280 
281 	rga_mm_unmap_job_info(job);
282 
283 	return job;
284 }
285 
rga_job_scheduler_timeout_clean(struct rga_scheduler_t * scheduler)286 static void rga_job_scheduler_timeout_clean(struct rga_scheduler_t *scheduler)
287 {
288 	unsigned long flags;
289 	struct rga_job *job = NULL;
290 
291 	spin_lock_irqsave(&scheduler->irq_lock, flags);
292 
293 	if (scheduler->running_job == NULL || scheduler->running_job->hw_running_time == 0) {
294 		spin_unlock_irqrestore(&scheduler->irq_lock, flags);
295 		return;
296 	}
297 
298 	job = scheduler->running_job;
299 	if (ktime_ms_delta(ktime_get(), job->hw_running_time) >= RGA_JOB_TIMEOUT_DELAY) {
300 		scheduler->running_job = NULL;
301 		scheduler->status = RGA_SCHEDULER_ABORT;
302 		scheduler->ops->soft_reset(scheduler);
303 
304 		spin_unlock_irqrestore(&scheduler->irq_lock, flags);
305 
306 		rga_mm_unmap_job_info(job);
307 
308 		job->ret = -EBUSY;
309 		rga_request_release_signal(scheduler, job);
310 
311 		rga_power_disable(scheduler);
312 	} else {
313 		spin_unlock_irqrestore(&scheduler->irq_lock, flags);
314 	}
315 }
316 
rga_job_insert_todo_list(struct rga_job * job)317 static void rga_job_insert_todo_list(struct rga_job *job)
318 {
319 	bool first_match = 0;
320 	unsigned long flags;
321 	struct rga_job *job_pos;
322 	struct rga_scheduler_t *scheduler = job->scheduler;
323 
324 	spin_lock_irqsave(&scheduler->irq_lock, flags);
325 
326 	/* priority policy set by userspace */
327 	if (list_empty(&scheduler->todo_list)
328 		|| (job->priority == RGA_SCHED_PRIORITY_DEFAULT)) {
329 		list_add_tail(&job->head, &scheduler->todo_list);
330 	} else {
331 		list_for_each_entry(job_pos, &scheduler->todo_list, head) {
332 			if (job->priority > job_pos->priority &&
333 					(!first_match)) {
334 				list_add(&job->head, &job_pos->head);
335 				first_match = true;
336 			}
337 
338 			/*
339 			 * Increase the priority of subsequent tasks
340 			 * after inserting into the list
341 			 */
342 			if (first_match)
343 				job_pos->priority++;
344 		}
345 
346 		if (!first_match)
347 			list_add_tail(&job->head, &scheduler->todo_list);
348 	}
349 
350 	scheduler->job_count++;
351 	set_bit(RGA_JOB_STATE_PENDING, &job->state);
352 
353 	spin_unlock_irqrestore(&scheduler->irq_lock, flags);
354 }
355 
rga_job_schedule(struct rga_job * job)356 static struct rga_scheduler_t *rga_job_schedule(struct rga_job *job)
357 {
358 	int i;
359 	struct rga_scheduler_t *scheduler = NULL;
360 
361 	for (i = 0; i < rga_drvdata->num_of_scheduler; i++) {
362 		scheduler = rga_drvdata->scheduler[i];
363 		rga_job_scheduler_timeout_clean(scheduler);
364 	}
365 
366 	if (rga_drvdata->num_of_scheduler > 1) {
367 		job->core = rga_job_assign(job);
368 		if (job->core <= 0) {
369 			pr_err("job assign failed");
370 			job->ret = -EINVAL;
371 			return NULL;
372 		}
373 	} else {
374 		job->core = rga_drvdata->scheduler[0]->core;
375 		job->scheduler = rga_drvdata->scheduler[0];
376 	}
377 
378 	scheduler = job->scheduler;
379 	if (scheduler == NULL) {
380 		pr_err("failed to get scheduler, %s(%d)\n", __func__, __LINE__);
381 		job->ret = -EFAULT;
382 		return NULL;
383 	}
384 
385 	return scheduler;
386 }
387 
rga_job_commit(struct rga_req * rga_command_base,struct rga_request * request)388 struct rga_job *rga_job_commit(struct rga_req *rga_command_base, struct rga_request *request)
389 {
390 	int ret;
391 	struct rga_job *job = NULL;
392 	struct rga_scheduler_t *scheduler = NULL;
393 
394 	job = rga_job_alloc(rga_command_base);
395 	if (!job) {
396 		pr_err("failed to alloc rga job!\n");
397 		return ERR_PTR(-ENOMEM);
398 	}
399 
400 	job->use_batch_mode = request->use_batch_mode;
401 	job->request_id = request->id;
402 	job->session = request->session;
403 	job->mm = request->current_mm;
404 
405 	scheduler = rga_job_schedule(job);
406 	if (scheduler == NULL) {
407 		pr_err("failed to get scheduler, %s(%d)\n", __func__, __LINE__);
408 		goto err_free_job;
409 	}
410 
411 	/* Memory mapping needs to keep pd enabled. */
412 	if (rga_power_enable(scheduler) < 0) {
413 		pr_err("power enable failed");
414 		job->ret = -EFAULT;
415 		goto err_free_job;
416 	}
417 
418 	ret = rga_mm_map_job_info(job);
419 	if (ret < 0) {
420 		pr_err("%s: failed to map job info\n", __func__);
421 		job->ret = ret;
422 		goto err_power_disable;
423 	}
424 
425 	ret = scheduler->ops->init_reg(job);
426 	if (ret < 0) {
427 		pr_err("%s: init reg failed", __func__);
428 		job->ret = ret;
429 		goto err_unmap_job_info;
430 	}
431 
432 	rga_job_insert_todo_list(job);
433 
434 	rga_job_next(scheduler);
435 
436 	rga_power_disable(scheduler);
437 
438 	return job;
439 
440 err_unmap_job_info:
441 	rga_mm_unmap_job_info(job);
442 
443 err_power_disable:
444 	rga_power_disable(scheduler);
445 
446 err_free_job:
447 	ret = job->ret;
448 	rga_request_release_signal(scheduler, job);
449 
450 	return ERR_PTR(ret);
451 }
452 
rga_is_need_current_mm(struct rga_req * req)453 static bool rga_is_need_current_mm(struct rga_req *req)
454 {
455 	int mmu_flag;
456 	struct rga_img_info_t *src0 = NULL;
457 	struct rga_img_info_t *src1 = NULL;
458 	struct rga_img_info_t *dst = NULL;
459 	struct rga_img_info_t *els = NULL;
460 
461 	src0 = &req->src;
462 	dst = &req->dst;
463 	if (req->render_mode != UPDATE_PALETTE_TABLE_MODE)
464 		src1 = &req->pat;
465 	else
466 		els = &req->pat;
467 
468 	if (likely(src0 != NULL)) {
469 		mmu_flag = ((req->mmu_info.mmu_flag >> 8) & 1);
470 		if (mmu_flag && src0->uv_addr)
471 			return true;
472 	}
473 
474 	if (likely(dst != NULL)) {
475 		mmu_flag = ((req->mmu_info.mmu_flag >> 10) & 1);
476 		if (mmu_flag && dst->uv_addr)
477 			return true;
478 	}
479 
480 	if (src1 != NULL) {
481 		mmu_flag = ((req->mmu_info.mmu_flag >> 9) & 1);
482 		if (mmu_flag && src1->uv_addr)
483 			return true;
484 	}
485 
486 	if (els != NULL) {
487 		mmu_flag = ((req->mmu_info.mmu_flag >> 11) & 1);
488 		if (mmu_flag && els->uv_addr)
489 			return true;
490 	}
491 
492 	return false;
493 }
494 
rga_request_get_current_mm(struct rga_request * request)495 static int rga_request_get_current_mm(struct rga_request *request)
496 {
497 	int i;
498 
499 	for (i = 0; i < request->task_count; i++) {
500 		if (rga_is_need_current_mm(&(request->task_list[i]))) {
501 			mmgrab(current->mm);
502 			mmget(current->mm);
503 			request->current_mm = current->mm;
504 
505 			break;
506 		}
507 	}
508 
509 	return 0;
510 }
511 
rga_request_put_current_mm(struct rga_request * request)512 static void rga_request_put_current_mm(struct rga_request *request)
513 {
514 	if (request->current_mm == NULL)
515 		return;
516 
517 	mmput(request->current_mm);
518 	mmdrop(request->current_mm);
519 	request->current_mm = NULL;
520 }
521 
rga_request_add_acquire_fence_callback(int acquire_fence_fd,struct rga_request * request,dma_fence_func_t cb_func)522 static int rga_request_add_acquire_fence_callback(int acquire_fence_fd,
523 						  struct rga_request *request,
524 						  dma_fence_func_t cb_func)
525 {
526 	int ret;
527 	struct dma_fence *acquire_fence = NULL;
528 	struct rga_pending_request_manager *request_manager = rga_drvdata->pend_request_manager;
529 
530 	if (DEBUGGER_EN(MSG))
531 		pr_info("acquire_fence_fd = %d", acquire_fence_fd);
532 
533 	acquire_fence = rga_get_dma_fence_from_fd(acquire_fence_fd);
534 	if (IS_ERR_OR_NULL(acquire_fence)) {
535 		pr_err("%s: failed to get acquire dma_fence from[%d]\n",
536 		       __func__, acquire_fence_fd);
537 		return -EINVAL;
538 	}
539 	/* close acquire fence fd */
540 #if LINUX_VERSION_CODE >= KERNEL_VERSION(6, 1, 0)
541 	close_fd(acquire_fence_fd);
542 #else
543 	ksys_close(acquire_fence_fd);
544 #endif
545 
546 	ret = rga_dma_fence_get_status(acquire_fence);
547 	if (ret < 0) {
548 		pr_err("%s: Current acquire fence unexpectedly has error status before signal\n",
549 		       __func__);
550 		return ret;
551 	} else if (ret > 0) {
552 		/* has been signaled */
553 		return ret;
554 	}
555 
556 	/*
557 	 * Ensure that the request will not be free early when
558 	 * the callback is called.
559 	 */
560 	mutex_lock(&request_manager->lock);
561 	rga_request_get(request);
562 	mutex_unlock(&request_manager->lock);
563 
564 	ret = rga_dma_fence_add_callback(acquire_fence, cb_func, (void *)request);
565 	if (ret < 0) {
566 		if (ret != -ENOENT)
567 			pr_err("%s: failed to add fence callback\n", __func__);
568 
569 		mutex_lock(&request_manager->lock);
570 		rga_request_put(request);
571 		mutex_unlock(&request_manager->lock);
572 		return ret;
573 	}
574 
575 	return 0;
576 }
577 
rga_request_check(struct rga_user_request * req)578 int rga_request_check(struct rga_user_request *req)
579 {
580 	if (req->id <= 0) {
581 		pr_err("user request id[%d] is invalid", req->id);
582 		return -EINVAL;
583 	}
584 
585 	if (req->task_num <= 0) {
586 		pr_err("invalied user request!\n");
587 		return -EINVAL;
588 	}
589 
590 	if (req->task_ptr == 0) {
591 		pr_err("task_ptr is NULL!\n");
592 		return -EINVAL;
593 	}
594 
595 	if (req->task_num > RGA_TASK_NUM_MAX) {
596 		pr_err("Only supports running %d tasks, now %d\n",
597 		       RGA_TASK_NUM_MAX, req->task_num);
598 		return -EFBIG;
599 	}
600 
601 	return 0;
602 }
603 
rga_request_lookup(struct rga_pending_request_manager * manager,uint32_t id)604 struct rga_request *rga_request_lookup(struct rga_pending_request_manager *manager, uint32_t id)
605 {
606 	struct rga_request *request = NULL;
607 
608 	WARN_ON(!mutex_is_locked(&manager->lock));
609 
610 	request = idr_find(&manager->request_idr, id);
611 
612 	return request;
613 }
614 
rga_request_scheduler_job_abort(struct rga_request * request)615 static int rga_request_scheduler_job_abort(struct rga_request *request)
616 {
617 	int i;
618 	unsigned long flags;
619 	enum rga_scheduler_status scheduler_status;
620 	int running_abort_count = 0, todo_abort_count = 0;
621 	struct rga_scheduler_t *scheduler = NULL;
622 	struct rga_job *job, *job_q;
623 	LIST_HEAD(list_to_free);
624 
625 	for (i = 0; i < rga_drvdata->num_of_scheduler; i++) {
626 		scheduler = rga_drvdata->scheduler[i];
627 		spin_lock_irqsave(&scheduler->irq_lock, flags);
628 
629 		list_for_each_entry_safe(job, job_q, &scheduler->todo_list, head) {
630 			if (request->id == job->request_id) {
631 				list_move(&job->head, &list_to_free);
632 				scheduler->job_count--;
633 
634 				todo_abort_count++;
635 			}
636 		}
637 
638 		job = NULL;
639 		if (scheduler->running_job) {
640 			if (request->id == scheduler->running_job->request_id) {
641 				job = scheduler->running_job;
642 				scheduler_status = scheduler->status;
643 				scheduler->running_job = NULL;
644 				scheduler->status = RGA_SCHEDULER_ABORT;
645 				list_add_tail(&job->head, &list_to_free);
646 
647 				if (job->hw_running_time != 0) {
648 					scheduler->timer.busy_time +=
649 						ktime_us_delta(ktime_get(), job->hw_recoder_time);
650 					scheduler->ops->soft_reset(scheduler);
651 				}
652 
653 				pr_err("reset core[%d] by request[%d] abort",
654 				       scheduler->core, request->id);
655 				running_abort_count++;
656 			}
657 		}
658 
659 		spin_unlock_irqrestore(&scheduler->irq_lock, flags);
660 
661 		if (job && scheduler_status == RGA_SCHEDULER_WORKING)
662 			rga_power_disable(scheduler);
663 	}
664 
665 	/* Clean up the jobs in the todo list that need to be free. */
666 	list_for_each_entry_safe(job, job_q, &list_to_free, head) {
667 		rga_mm_unmap_job_info(job);
668 
669 		job->ret = -EBUSY;
670 		rga_job_cleanup(job);
671 	}
672 
673 	/* This means it has been cleaned up. */
674 	if (running_abort_count + todo_abort_count == 0)
675 		return 1;
676 
677 	pr_err("request[%d] abort! finished %d failed %d running_abort %d todo_abort %d\n",
678 	       request->id, request->finished_task_count, request->failed_task_count,
679 	       running_abort_count, todo_abort_count);
680 
681 	return 0;
682 }
683 
rga_request_release_abort(struct rga_request * request,int err_code)684 static void rga_request_release_abort(struct rga_request *request, int err_code)
685 {
686 	unsigned long flags;
687 	struct rga_pending_request_manager *request_manager = rga_drvdata->pend_request_manager;
688 
689 	if (rga_request_scheduler_job_abort(request) > 0)
690 		return;
691 
692 	spin_lock_irqsave(&request->lock, flags);
693 
694 	if (request->is_done) {
695 		spin_unlock_irqrestore(&request->lock, flags);
696 		return;
697 	}
698 
699 	request->is_running = false;
700 	request->is_done = false;
701 
702 	rga_request_put_current_mm(request);
703 
704 	spin_unlock_irqrestore(&request->lock, flags);
705 
706 	rga_dma_fence_signal(request->release_fence, err_code);
707 
708 	mutex_lock(&request_manager->lock);
709 	/* current submit request put */
710 	rga_request_put(request);
711 	mutex_unlock(&request_manager->lock);
712 }
713 
rga_request_session_destroy_abort(struct rga_session * session)714 void rga_request_session_destroy_abort(struct rga_session *session)
715 {
716 	int request_id;
717 	struct rga_request *request;
718 	struct rga_pending_request_manager *request_manager;
719 
720 	request_manager = rga_drvdata->pend_request_manager;
721 	if (request_manager == NULL) {
722 		pr_err("rga_pending_request_manager is null!\n");
723 		return;
724 	}
725 
726 	mutex_lock(&request_manager->lock);
727 
728 	idr_for_each_entry(&request_manager->request_idr, request, request_id) {
729 		if (session == request->session) {
730 			pr_err("[tgid:%d pid:%d] destroy request[%d] when the user exits",
731 			       session->tgid, current->pid, request->id);
732 			rga_request_put(request);
733 		}
734 	}
735 
736 	mutex_unlock(&request_manager->lock);
737 }
738 
rga_request_timeout_query_state(struct rga_request * request)739 static int rga_request_timeout_query_state(struct rga_request *request)
740 {
741 	int i;
742 	unsigned long flags;
743 	struct rga_scheduler_t *scheduler = NULL;
744 	struct rga_job *job = NULL;
745 
746 	for (i = 0; i < rga_drvdata->num_of_scheduler; i++) {
747 		scheduler = rga_drvdata->scheduler[i];
748 
749 		spin_lock_irqsave(&scheduler->irq_lock, flags);
750 
751 		if (scheduler->running_job) {
752 			job = scheduler->running_job;
753 			if (request->id == job->request_id) {
754 				if (test_bit(RGA_JOB_STATE_DONE, &job->state) &&
755 				    test_bit(RGA_JOB_STATE_FINISH, &job->state)) {
756 					spin_unlock_irqrestore(&scheduler->irq_lock, flags);
757 					return request->ret;
758 				} else if (!test_bit(RGA_JOB_STATE_DONE, &job->state) &&
759 					   test_bit(RGA_JOB_STATE_FINISH, &job->state)) {
760 					spin_unlock_irqrestore(&scheduler->irq_lock, flags);
761 					pr_err("request[%d] hardware has finished, but the software has timeout!\n",
762 					       request->id);
763 					return -EBUSY;
764 				} else if (!test_bit(RGA_JOB_STATE_DONE, &job->state) &&
765 					   !test_bit(RGA_JOB_STATE_FINISH, &job->state)) {
766 					spin_unlock_irqrestore(&scheduler->irq_lock, flags);
767 					pr_err("request[%d] hardware has timeout.\n", request->id);
768 					return -EBUSY;
769 				}
770 			}
771 		}
772 
773 		spin_unlock_irqrestore(&scheduler->irq_lock, flags);
774 	}
775 
776 	return request->ret;
777 }
778 
rga_request_wait(struct rga_request * request)779 static int rga_request_wait(struct rga_request *request)
780 {
781 	int left_time;
782 	int ret;
783 
784 	left_time = wait_event_timeout(request->finished_wq, request->is_done,
785 				       RGA_JOB_TIMEOUT_DELAY * request->task_count);
786 
787 	switch (left_time) {
788 	case 0:
789 		ret = rga_request_timeout_query_state(request);
790 		goto err_request_abort;
791 	case -ERESTARTSYS:
792 		ret = -ERESTARTSYS;
793 		goto err_request_abort;
794 	default:
795 		ret = request->ret;
796 		break;
797 	}
798 
799 	return ret;
800 
801 err_request_abort:
802 	rga_request_release_abort(request, ret);
803 
804 	return ret;
805 }
806 
rga_request_commit(struct rga_request * request)807 int rga_request_commit(struct rga_request *request)
808 {
809 	int ret;
810 	int i = 0;
811 	struct rga_job *job;
812 
813 	for (i = 0; i < request->task_count; i++) {
814 		job = rga_job_commit(&(request->task_list[i]), request);
815 		if (IS_ERR(job)) {
816 			pr_err("request[%d] task[%d] job_commit failed.\n", request->id, i);
817 			rga_request_release_abort(request, PTR_ERR(job));
818 
819 			return PTR_ERR(job);
820 		}
821 	}
822 
823 	if (request->sync_mode == RGA_BLIT_SYNC) {
824 		ret = rga_request_wait(request);
825 		if (ret < 0)
826 			return ret;
827 	}
828 
829 	return 0;
830 }
831 
rga_request_acquire_fence_signaled_cb(struct dma_fence * fence,struct dma_fence_cb * _waiter)832 static void rga_request_acquire_fence_signaled_cb(struct dma_fence *fence,
833 						  struct dma_fence_cb *_waiter)
834 {
835 	struct rga_fence_waiter *waiter = (struct rga_fence_waiter *)_waiter;
836 	struct rga_request *request = (struct rga_request *)waiter->private;
837 	struct rga_pending_request_manager *request_manager = rga_drvdata->pend_request_manager;
838 
839 	if (rga_request_commit(request))
840 		pr_err("rga request[%d] commit failed!\n", request->id);
841 
842 	mutex_lock(&request_manager->lock);
843 	rga_request_put(request);
844 	mutex_unlock(&request_manager->lock);
845 
846 	kfree(waiter);
847 }
848 
rga_request_release_signal(struct rga_scheduler_t * scheduler,struct rga_job * job)849 int rga_request_release_signal(struct rga_scheduler_t *scheduler, struct rga_job *job)
850 {
851 	struct rga_pending_request_manager *request_manager;
852 	struct rga_request *request;
853 	int finished_count, failed_count;
854 	unsigned long flags;
855 
856 	request_manager = rga_drvdata->pend_request_manager;
857 	if (request_manager == NULL) {
858 		pr_err("rga_pending_request_manager is null!\n");
859 		return -EFAULT;
860 	}
861 
862 	mutex_lock(&request_manager->lock);
863 
864 	request = rga_request_lookup(request_manager, job->request_id);
865 	if (IS_ERR_OR_NULL(request)) {
866 		pr_err("can not find internal request from id[%d]", job->request_id);
867 		mutex_unlock(&request_manager->lock);
868 		return -EINVAL;
869 	}
870 
871 	rga_request_get(request);
872 	mutex_unlock(&request_manager->lock);
873 
874 	spin_lock_irqsave(&request->lock, flags);
875 
876 	if (job->ret < 0) {
877 		request->failed_task_count++;
878 		request->ret = job->ret;
879 	} else {
880 		request->finished_task_count++;
881 	}
882 
883 	failed_count = request->failed_task_count;
884 	finished_count = request->finished_task_count;
885 
886 	spin_unlock_irqrestore(&request->lock, flags);
887 
888 	rga_job_cleanup(job);
889 
890 	if ((failed_count + finished_count) >= request->task_count) {
891 		spin_lock_irqsave(&request->lock, flags);
892 
893 		request->is_running = false;
894 		request->is_done = true;
895 
896 		rga_request_put_current_mm(request);
897 
898 		spin_unlock_irqrestore(&request->lock, flags);
899 
900 		rga_dma_fence_signal(request->release_fence, request->ret);
901 
902 		wake_up(&request->finished_wq);
903 
904 		if (DEBUGGER_EN(MSG))
905 			pr_info("request[%d] finished %d failed %d\n",
906 				request->id, finished_count, failed_count);
907 
908 		/* current submit request put */
909 		mutex_lock(&request_manager->lock);
910 		rga_request_put(request);
911 		mutex_unlock(&request_manager->lock);
912 	}
913 
914 	mutex_lock(&request_manager->lock);
915 	rga_request_put(request);
916 	mutex_unlock(&request_manager->lock);
917 
918 	return 0;
919 }
920 
rga_request_config(struct rga_user_request * user_request)921 struct rga_request *rga_request_config(struct rga_user_request *user_request)
922 {
923 	int ret;
924 	unsigned long flags;
925 	struct rga_pending_request_manager *request_manager;
926 	struct rga_request *request;
927 	struct rga_req *task_list;
928 
929 	request_manager = rga_drvdata->pend_request_manager;
930 	if (request_manager == NULL) {
931 		pr_err("rga_pending_request_manager is null!\n");
932 		return ERR_PTR(-EFAULT);
933 	}
934 
935 	mutex_lock(&request_manager->lock);
936 
937 	request = rga_request_lookup(request_manager, user_request->id);
938 	if (IS_ERR_OR_NULL(request)) {
939 		pr_err("can not find request from id[%d]", user_request->id);
940 		mutex_unlock(&request_manager->lock);
941 		return ERR_PTR(-EINVAL);
942 	}
943 
944 	rga_request_get(request);
945 	mutex_unlock(&request_manager->lock);
946 
947 	task_list = kmalloc_array(user_request->task_num, sizeof(struct rga_req), GFP_KERNEL);
948 	if (task_list == NULL) {
949 		pr_err("task_req list alloc error!\n");
950 		ret = -ENOMEM;
951 		goto err_put_request;
952 	}
953 
954 	if (unlikely(copy_from_user(task_list, u64_to_user_ptr(user_request->task_ptr),
955 				    sizeof(struct rga_req) * user_request->task_num))) {
956 		pr_err("rga_user_request task list copy_from_user failed\n");
957 		ret = -EFAULT;
958 		goto err_free_task_list;
959 	}
960 
961 	spin_lock_irqsave(&request->lock, flags);
962 
963 	request->use_batch_mode = true;
964 	request->task_list = task_list;
965 	request->task_count = user_request->task_num;
966 	request->sync_mode = user_request->sync_mode;
967 	request->mpi_config_flags = user_request->mpi_config_flags;
968 	request->acquire_fence_fd = user_request->acquire_fence_fd;
969 
970 	spin_unlock_irqrestore(&request->lock, flags);
971 
972 	return request;
973 
974 err_free_task_list:
975 	kfree(task_list);
976 err_put_request:
977 	mutex_lock(&request_manager->lock);
978 	rga_request_put(request);
979 	mutex_unlock(&request_manager->lock);
980 
981 	return ERR_PTR(ret);
982 }
983 
rga_request_kernel_config(struct rga_user_request * user_request)984 struct rga_request *rga_request_kernel_config(struct rga_user_request *user_request)
985 {
986 	int ret = 0;
987 	unsigned long flags;
988 	struct rga_pending_request_manager *request_manager;
989 	struct rga_request *request;
990 	struct rga_req *task_list;
991 
992 	request_manager = rga_drvdata->pend_request_manager;
993 	if (request_manager == NULL) {
994 		pr_err("rga_pending_request_manager is null!\n");
995 		return ERR_PTR(-EFAULT);
996 	}
997 
998 	mutex_lock(&request_manager->lock);
999 
1000 	request = rga_request_lookup(request_manager, user_request->id);
1001 	if (IS_ERR_OR_NULL(request)) {
1002 		pr_err("can not find request from id[%d]", user_request->id);
1003 		mutex_unlock(&request_manager->lock);
1004 		return ERR_PTR(-EINVAL);
1005 	}
1006 
1007 	rga_request_get(request);
1008 	mutex_unlock(&request_manager->lock);
1009 
1010 	task_list = kmalloc_array(user_request->task_num, sizeof(struct rga_req), GFP_KERNEL);
1011 	if (task_list == NULL) {
1012 		pr_err("task_req list alloc error!\n");
1013 		ret = -ENOMEM;
1014 		goto err_put_request;
1015 	}
1016 
1017 	memcpy(task_list, u64_to_user_ptr(user_request->task_ptr),
1018 	       sizeof(struct rga_req) * user_request->task_num);
1019 
1020 	spin_lock_irqsave(&request->lock, flags);
1021 
1022 	request->use_batch_mode = true;
1023 	request->task_list = task_list;
1024 	request->task_count = user_request->task_num;
1025 	request->sync_mode = user_request->sync_mode;
1026 	request->mpi_config_flags = user_request->mpi_config_flags;
1027 	request->acquire_fence_fd = user_request->acquire_fence_fd;
1028 
1029 	spin_unlock_irqrestore(&request->lock, flags);
1030 
1031 	return request;
1032 
1033 err_put_request:
1034 	mutex_lock(&request_manager->lock);
1035 	rga_request_put(request);
1036 	mutex_unlock(&request_manager->lock);
1037 
1038 	return ERR_PTR(ret);
1039 }
1040 
rga_request_submit(struct rga_request * request)1041 int rga_request_submit(struct rga_request *request)
1042 {
1043 	int ret = 0;
1044 	unsigned long flags;
1045 	struct dma_fence *release_fence;
1046 
1047 	spin_lock_irqsave(&request->lock, flags);
1048 
1049 	if (request->is_running) {
1050 		spin_unlock_irqrestore(&request->lock, flags);
1051 
1052 		pr_err("can not re-config when request is running\n");
1053 		return -EFAULT;
1054 	}
1055 
1056 	if (request->task_list == NULL) {
1057 		spin_unlock_irqrestore(&request->lock, flags);
1058 
1059 		pr_err("can not find task list from id[%d]\n", request->id);
1060 		return -EINVAL;
1061 	}
1062 
1063 	/* Reset */
1064 	request->is_running = true;
1065 	request->is_done = false;
1066 	request->finished_task_count = 0;
1067 	request->failed_task_count = 0;
1068 
1069 	rga_request_get_current_mm(request);
1070 
1071 	/* Unlock after ensuring that the current request will not be resubmitted. */
1072 	spin_unlock_irqrestore(&request->lock, flags);
1073 
1074 	if (request->sync_mode == RGA_BLIT_ASYNC) {
1075 		release_fence = rga_dma_fence_alloc();
1076 		if (IS_ERR(release_fence)) {
1077 			pr_err("Can not alloc release fence!\n");
1078 			ret = IS_ERR(release_fence);
1079 			goto error_put_current_mm;
1080 		}
1081 		request->release_fence = release_fence;
1082 
1083 		if (request->acquire_fence_fd > 0) {
1084 			ret = rga_request_add_acquire_fence_callback(
1085 				request->acquire_fence_fd, request,
1086 				rga_request_acquire_fence_signaled_cb);
1087 			if (ret == 0) {
1088 				/* acquire fence active */
1089 				goto export_release_fence_fd;
1090 			} else if (ret > 0) {
1091 				/* acquire fence has been signaled */
1092 				goto request_commit;
1093 			} else {
1094 				pr_err("Failed to add callback with acquire fence fd[%d]!\n",
1095 				       request->acquire_fence_fd);
1096 				goto err_put_release_fence;
1097 			}
1098 		}
1099 	}
1100 
1101 request_commit:
1102 	ret = rga_request_commit(request);
1103 	if (ret < 0) {
1104 		pr_err("rga request[%d] commit failed!\n", request->id);
1105 		goto err_put_release_fence;
1106 	}
1107 
1108 export_release_fence_fd:
1109 	if (request->release_fence != NULL) {
1110 		ret = rga_dma_fence_get_fd(request->release_fence);
1111 		if (ret < 0) {
1112 			pr_err("Failed to alloc release fence fd!\n");
1113 			rga_request_release_abort(request, ret);
1114 			return ret;
1115 		}
1116 
1117 		request->release_fence_fd = ret;
1118 	}
1119 
1120 	return 0;
1121 
1122 err_put_release_fence:
1123 	if (request->release_fence != NULL) {
1124 		rga_dma_fence_put(request->release_fence);
1125 		request->release_fence = NULL;
1126 	}
1127 
1128 error_put_current_mm:
1129 	spin_lock_irqsave(&request->lock, flags);
1130 
1131 	rga_request_put_current_mm(request);
1132 	request->is_running = false;
1133 
1134 	spin_unlock_irqrestore(&request->lock, flags);
1135 
1136 	return ret;
1137 }
1138 
rga_request_mpi_submit(struct rga_req * req,struct rga_request * request)1139 int rga_request_mpi_submit(struct rga_req *req, struct rga_request *request)
1140 {
1141 	int ret = 0;
1142 	struct rga_job *job = NULL;
1143 	unsigned long flags;
1144 
1145 	if (request->sync_mode == RGA_BLIT_ASYNC) {
1146 		pr_err("mpi unsupported async mode!\n");
1147 		return -EINVAL;
1148 	}
1149 
1150 	spin_lock_irqsave(&request->lock, flags);
1151 
1152 	if (request->is_running) {
1153 		pr_err("can not re-config when request is running");
1154 		spin_unlock_irqrestore(&request->lock, flags);
1155 		return -EFAULT;
1156 	}
1157 
1158 	if (request->task_list == NULL) {
1159 		pr_err("can not find task list from id[%d]", request->id);
1160 		spin_unlock_irqrestore(&request->lock, flags);
1161 		return -EINVAL;
1162 	}
1163 
1164 	/* Reset */
1165 	request->is_running = true;
1166 	request->is_done = false;
1167 	request->finished_task_count = 0;
1168 	request->failed_task_count = 0;
1169 
1170 	spin_unlock_irqrestore(&request->lock, flags);
1171 
1172 	job = rga_job_commit(req, request);
1173 	if (IS_ERR_OR_NULL(job)) {
1174 		pr_err("failed to commit job!\n");
1175 		return job ? PTR_ERR(job) : -EFAULT;
1176 	}
1177 
1178 	ret = rga_request_wait(request);
1179 	if (ret < 0)
1180 		return ret;
1181 
1182 	return 0;
1183 }
1184 
rga_request_free(struct rga_request * request)1185 int rga_request_free(struct rga_request *request)
1186 {
1187 	struct rga_pending_request_manager *request_manager;
1188 	struct rga_req *task_list;
1189 	unsigned long flags;
1190 
1191 	request_manager = rga_drvdata->pend_request_manager;
1192 	if (request_manager == NULL) {
1193 		pr_err("rga_pending_request_manager is null!\n");
1194 		return -EFAULT;
1195 	}
1196 
1197 	WARN_ON(!mutex_is_locked(&request_manager->lock));
1198 
1199 	if (IS_ERR_OR_NULL(request)) {
1200 		pr_err("request already freed");
1201 		return -EFAULT;
1202 	}
1203 
1204 	request_manager->request_count--;
1205 	idr_remove(&request_manager->request_idr, request->id);
1206 
1207 	spin_lock_irqsave(&request->lock, flags);
1208 
1209 	task_list = request->task_list;
1210 
1211 	spin_unlock_irqrestore(&request->lock, flags);
1212 
1213 	if (task_list != NULL)
1214 		kfree(task_list);
1215 
1216 	kfree(request);
1217 
1218 	return 0;
1219 }
1220 
rga_request_kref_release(struct kref * ref)1221 static void rga_request_kref_release(struct kref *ref)
1222 {
1223 	struct rga_request *request;
1224 	unsigned long flags;
1225 
1226 	request = container_of(ref, struct rga_request, refcount);
1227 
1228 	if (rga_dma_fence_get_status(request->release_fence) == 0)
1229 		rga_dma_fence_signal(request->release_fence, -EFAULT);
1230 
1231 	spin_lock_irqsave(&request->lock, flags);
1232 
1233 	rga_request_put_current_mm(request);
1234 	rga_dma_fence_put(request->release_fence);
1235 
1236 	if (!request->is_running || request->is_done) {
1237 		spin_unlock_irqrestore(&request->lock, flags);
1238 		goto free_request;
1239 	}
1240 
1241 	spin_unlock_irqrestore(&request->lock, flags);
1242 
1243 	rga_request_scheduler_job_abort(request);
1244 
1245 free_request:
1246 	rga_request_free(request);
1247 }
1248 
1249 /*
1250  * Called at driver close to release the request's id references.
1251  */
rga_request_free_cb(int id,void * ptr,void * data)1252 static int rga_request_free_cb(int id, void *ptr, void *data)
1253 {
1254 	return rga_request_free((struct rga_request *)ptr);
1255 }
1256 
rga_request_alloc(uint32_t flags,struct rga_session * session)1257 int rga_request_alloc(uint32_t flags, struct rga_session *session)
1258 {
1259 	int new_id;
1260 	struct rga_pending_request_manager *request_manager;
1261 	struct rga_request *request;
1262 
1263 	request_manager = rga_drvdata->pend_request_manager;
1264 	if (request_manager == NULL) {
1265 		pr_err("rga_pending_request_manager is null!\n");
1266 		return -EFAULT;
1267 	}
1268 
1269 	request = kzalloc(sizeof(*request), GFP_KERNEL);
1270 	if (request == NULL) {
1271 		pr_err("can not kzalloc for rga_request\n");
1272 		return -ENOMEM;
1273 	}
1274 
1275 	spin_lock_init(&request->lock);
1276 	init_waitqueue_head(&request->finished_wq);
1277 
1278 	request->pid = current->pid;
1279 	request->flags = flags;
1280 	request->session = session;
1281 	kref_init(&request->refcount);
1282 
1283 	/*
1284 	 * Get the user-visible handle using idr. Preload and perform
1285 	 * allocation under our spinlock.
1286 	 */
1287 	mutex_lock(&request_manager->lock);
1288 
1289 	idr_preload(GFP_KERNEL);
1290 	new_id = idr_alloc_cyclic(&request_manager->request_idr, request, 1, 0, GFP_NOWAIT);
1291 	idr_preload_end();
1292 	if (new_id < 0) {
1293 		pr_err("request alloc id failed!\n");
1294 
1295 		mutex_unlock(&request_manager->lock);
1296 		kfree(request);
1297 		return new_id;
1298 	}
1299 
1300 	request->id = new_id;
1301 	request_manager->request_count++;
1302 
1303 	mutex_unlock(&request_manager->lock);
1304 
1305 	return request->id;
1306 }
1307 
rga_request_put(struct rga_request * request)1308 int rga_request_put(struct rga_request *request)
1309 {
1310 	return kref_put(&request->refcount, rga_request_kref_release);
1311 }
1312 
rga_request_get(struct rga_request * request)1313 void rga_request_get(struct rga_request *request)
1314 {
1315 	kref_get(&request->refcount);
1316 }
1317 
rga_request_manager_init(struct rga_pending_request_manager ** request_manager_session)1318 int rga_request_manager_init(struct rga_pending_request_manager **request_manager_session)
1319 {
1320 	struct rga_pending_request_manager *request_manager = NULL;
1321 
1322 	*request_manager_session = kzalloc(sizeof(struct rga_pending_request_manager), GFP_KERNEL);
1323 	if (*request_manager_session == NULL) {
1324 		pr_err("can not kzalloc for rga_pending_request_manager\n");
1325 		return -ENOMEM;
1326 	}
1327 
1328 	request_manager = *request_manager_session;
1329 
1330 	mutex_init(&request_manager->lock);
1331 
1332 	idr_init_base(&request_manager->request_idr, 1);
1333 
1334 	return 0;
1335 }
1336 
rga_request_manager_remove(struct rga_pending_request_manager ** request_manager_session)1337 int rga_request_manager_remove(struct rga_pending_request_manager **request_manager_session)
1338 {
1339 	struct rga_pending_request_manager *request_manager = *request_manager_session;
1340 
1341 	mutex_lock(&request_manager->lock);
1342 
1343 	idr_for_each(&request_manager->request_idr, &rga_request_free_cb, request_manager);
1344 	idr_destroy(&request_manager->request_idr);
1345 
1346 	mutex_unlock(&request_manager->lock);
1347 
1348 	kfree(*request_manager_session);
1349 
1350 	*request_manager_session = NULL;
1351 
1352 	return 0;
1353 }
1354