xref: /OK3568_Linux_fs/kernel/drivers/video/rockchip/rga3/rga_drv.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) Rockchip Electronics Co., Ltd.
4  *
5  * Author: Huang Lee <Putin.li@rock-chips.com>
6  */
7 
8 #define pr_fmt(fmt) "rga: " fmt
9 
10 #include "rga2_reg_info.h"
11 #include "rga3_reg_info.h"
12 #include "rga_dma_buf.h"
13 #include "rga_mm.h"
14 
15 #include "rga_job.h"
16 #include "rga_fence.h"
17 #include "rga_hw_config.h"
18 
19 #include "rga_iommu.h"
20 #include "rga_debugger.h"
21 #include "rga_common.h"
22 
23 struct rga_drvdata_t *rga_drvdata;
24 
25 /* set hrtimer */
26 static struct hrtimer timer;
27 static ktime_t kt;
28 
29 static struct rga_session *rga_session_init(void);
30 static int rga_session_deinit(struct rga_session *session);
31 
rga_mpi_set_channel_buffer(struct dma_buf * dma_buf,struct rga_img_info_t * channel_info,struct rga_session * session)32 static int rga_mpi_set_channel_buffer(struct dma_buf *dma_buf,
33 				      struct rga_img_info_t *channel_info,
34 				      struct rga_session *session)
35 {
36 	struct rga_external_buffer buffer;
37 
38 	memset(&buffer, 0x0, sizeof(buffer));
39 	buffer.memory = (unsigned long)dma_buf;
40 	buffer.type = RGA_DMA_BUFFER_PTR;
41 	buffer.memory_parm.width = channel_info->vir_w;
42 	buffer.memory_parm.height = channel_info->vir_h;
43 	buffer.memory_parm.format = channel_info->format;
44 
45 	buffer.handle = rga_mm_import_buffer(&buffer, session);
46 	if (buffer.handle == 0) {
47 		pr_err("can not import dma_buf %p\n", dma_buf);
48 		return -EFAULT;
49 	}
50 	channel_info->yrgb_addr = buffer.handle;
51 
52 	return 0;
53 }
54 
rga_mpi_set_channel_info(uint32_t flags_mask,uint32_t flags,struct rga_video_frame_info * mpi_frame,struct rga_img_info_t * channel_info,struct rga_img_info_t * cache_info)55 static void rga_mpi_set_channel_info(uint32_t flags_mask, uint32_t flags,
56 				     struct rga_video_frame_info *mpi_frame,
57 				     struct rga_img_info_t *channel_info,
58 				     struct rga_img_info_t *cache_info)
59 {
60 	uint32_t fix_enable_flag, cache_info_flag;
61 
62 	switch (flags_mask) {
63 	case RGA_CONTEXT_SRC_MASK:
64 		fix_enable_flag = RGA_CONTEXT_SRC_FIX_ENABLE;
65 		cache_info_flag = RGA_CONTEXT_SRC_CACHE_INFO;
66 		break;
67 	case RGA_CONTEXT_PAT_MASK:
68 		fix_enable_flag = RGA_CONTEXT_PAT_FIX_ENABLE;
69 		cache_info_flag = RGA_CONTEXT_PAT_CACHE_INFO;
70 		break;
71 	case RGA_CONTEXT_DST_MASK:
72 		fix_enable_flag = RGA_CONTEXT_DST_FIX_ENABLE;
73 		cache_info_flag = RGA_CONTEXT_DST_CACHE_INFO;
74 		break;
75 	default:
76 		return;
77 	}
78 
79 	if (flags & fix_enable_flag) {
80 		channel_info->x_offset = mpi_frame->x_offset;
81 		channel_info->y_offset = mpi_frame->y_offset;
82 		channel_info->act_w = mpi_frame->width;
83 		channel_info->act_h = mpi_frame->height;
84 		channel_info->vir_w = mpi_frame->vir_w;
85 		channel_info->vir_h = mpi_frame->vir_h;
86 		channel_info->rd_mode = mpi_frame->rd_mode;
87 		channel_info->format = mpi_frame->format;
88 
89 		if (flags & cache_info_flag) {
90 			/* Replace the config of src in ctx with the config of mpi src. */
91 			cache_info->x_offset = mpi_frame->x_offset;
92 			cache_info->y_offset = mpi_frame->y_offset;
93 			cache_info->act_w = mpi_frame->width;
94 			cache_info->act_h = mpi_frame->height;
95 			cache_info->vir_w = mpi_frame->vir_w;
96 			cache_info->vir_h = mpi_frame->vir_h;
97 			cache_info->rd_mode = mpi_frame->rd_mode;
98 			cache_info->format = mpi_frame->format;
99 
100 		}
101 	}
102 }
103 
rga_mpi_commit(struct rga_mpi_job_t * mpi_job)104 int rga_mpi_commit(struct rga_mpi_job_t *mpi_job)
105 {
106 	int ret = 0;
107 	struct rga_pending_request_manager *request_manager;
108 	struct rga_request *request;
109 	struct rga_req *cached_cmd;
110 	struct rga_req mpi_cmd;
111 	unsigned long flags;
112 
113 	request_manager = rga_drvdata->pend_request_manager;
114 
115 	mutex_lock(&request_manager->lock);
116 	request = rga_request_lookup(request_manager, mpi_job->ctx_id);
117 	if (IS_ERR_OR_NULL(request)) {
118 		pr_err("can not find request from id[%d]", mpi_job->ctx_id);
119 		mutex_unlock(&request_manager->lock);
120 		return -EINVAL;
121 	}
122 
123 	if (request->task_count > 1) {
124 		/* TODO */
125 		pr_err("Currently request does not support multiple tasks!");
126 		mutex_unlock(&request_manager->lock);
127 		return -EINVAL;
128 	}
129 
130 	/*
131 	 * The mpi commit will use the request repeatedly, so an additional
132 	 * get() is added here.
133 	 */
134 	rga_request_get(request);
135 	mutex_unlock(&request_manager->lock);
136 
137 	spin_lock_irqsave(&request->lock, flags);
138 
139 	/* TODO: batch mode need mpi async mode */
140 	request->sync_mode = RGA_BLIT_SYNC;
141 
142 	cached_cmd = request->task_list;
143 	memcpy(&mpi_cmd, cached_cmd, sizeof(mpi_cmd));
144 
145 	spin_unlock_irqrestore(&request->lock, flags);
146 
147 	/* set channel info */
148 	if ((mpi_job->src != NULL) && (request->flags & RGA_CONTEXT_SRC_MASK))
149 		rga_mpi_set_channel_info(RGA_CONTEXT_SRC_MASK,
150 					 request->flags,
151 					 mpi_job->src,
152 					 &mpi_cmd.src,
153 					 &cached_cmd->src);
154 
155 	if ((mpi_job->pat != NULL) && (request->flags & RGA_CONTEXT_PAT_MASK))
156 		rga_mpi_set_channel_info(RGA_CONTEXT_PAT_MASK,
157 					 request->flags,
158 					 mpi_job->pat,
159 					 &mpi_cmd.pat,
160 					 &cached_cmd->pat);
161 
162 	if ((mpi_job->dst != NULL) && (request->flags & RGA_CONTEXT_DST_MASK))
163 		rga_mpi_set_channel_info(RGA_CONTEXT_DST_MASK,
164 					 request->flags,
165 					 mpi_job->dst,
166 					 &mpi_cmd.dst,
167 					 &cached_cmd->dst);
168 
169 	/* set buffer handle */
170 	if (mpi_job->dma_buf_src0 != NULL) {
171 		ret = rga_mpi_set_channel_buffer(mpi_job->dma_buf_src0,
172 						 &mpi_cmd.src,
173 						 request->session);
174 		if (ret < 0) {
175 			pr_err("src channel set buffer handle failed!\n");
176 			goto err_put_request;
177 		}
178 	}
179 
180 	if (mpi_job->dma_buf_src1 != NULL) {
181 		ret = rga_mpi_set_channel_buffer(mpi_job->dma_buf_src1,
182 						 &mpi_cmd.pat,
183 						 request->session);
184 		if (ret < 0) {
185 			pr_err("src1 channel set buffer handle failed!\n");
186 			goto err_put_request;
187 		}
188 	}
189 
190 	if (mpi_job->dma_buf_dst != NULL) {
191 		ret = rga_mpi_set_channel_buffer(mpi_job->dma_buf_dst,
192 						 &mpi_cmd.dst,
193 						 request->session);
194 		if (ret < 0) {
195 			pr_err("dst channel set buffer handle failed!\n");
196 			goto err_put_request;
197 		}
198 	}
199 
200 	mpi_cmd.handle_flag = 1;
201 	mpi_cmd.mmu_info.mmu_en = 0;
202 	mpi_cmd.mmu_info.mmu_flag = 0;
203 
204 	if (DEBUGGER_EN(MSG))
205 		rga_cmd_print_debug_info(&mpi_cmd);
206 
207 	ret = rga_request_mpi_submit(&mpi_cmd, request);
208 	if (ret < 0) {
209 		if (ret == -ERESTARTSYS) {
210 			if (DEBUGGER_EN(MSG))
211 				pr_err("%s, commit mpi job failed, by a software interrupt.\n",
212 					__func__);
213 		} else {
214 			pr_err("%s, commit mpi job failed\n", __func__);
215 		}
216 
217 		goto err_put_request;
218 	}
219 
220 	if ((mpi_job->dma_buf_src0 != NULL) && (mpi_cmd.src.yrgb_addr > 0))
221 		rga_mm_release_buffer(mpi_cmd.src.yrgb_addr);
222 	if ((mpi_job->dma_buf_src1 != NULL) && (mpi_cmd.pat.yrgb_addr > 0))
223 		rga_mm_release_buffer(mpi_cmd.pat.yrgb_addr);
224 	if ((mpi_job->dma_buf_dst != NULL) && (mpi_cmd.dst.yrgb_addr > 0))
225 		rga_mm_release_buffer(mpi_cmd.dst.yrgb_addr);
226 
227 	/* copy dst info to mpi job for next node */
228 	if (mpi_job->output != NULL) {
229 		mpi_job->output->x_offset = mpi_cmd.dst.x_offset;
230 		mpi_job->output->y_offset = mpi_cmd.dst.y_offset;
231 		mpi_job->output->width = mpi_cmd.dst.act_w;
232 		mpi_job->output->height = mpi_cmd.dst.act_h;
233 		mpi_job->output->vir_w = mpi_cmd.dst.vir_w;
234 		mpi_job->output->vir_h = mpi_cmd.dst.vir_h;
235 		mpi_job->output->rd_mode = mpi_cmd.dst.rd_mode;
236 		mpi_job->output->format = mpi_cmd.dst.format;
237 	}
238 
239 	return 0;
240 
241 err_put_request:
242 	mutex_lock(&request_manager->lock);
243 	rga_request_put(request);
244 	mutex_unlock(&request_manager->lock);
245 
246 	return ret;
247 }
248 EXPORT_SYMBOL_GPL(rga_mpi_commit);
249 
rga_kernel_commit(struct rga_req * cmd)250 int rga_kernel_commit(struct rga_req *cmd)
251 {
252 	int ret = 0;
253 	int request_id;
254 	struct rga_user_request kernel_request;
255 	struct rga_request *request = NULL;
256 	struct rga_session *session = NULL;
257 	struct rga_pending_request_manager *request_manager = rga_drvdata->pend_request_manager;
258 
259 	session = rga_session_init();
260 	if (IS_ERR(session))
261 		return PTR_ERR(session);
262 
263 	request_id = rga_request_alloc(0, session);
264 	if (request_id < 0) {
265 		pr_err("request alloc error!\n");
266 		ret = request_id;
267 		return ret;
268 	}
269 
270 	memset(&kernel_request, 0, sizeof(kernel_request));
271 	kernel_request.id = request_id;
272 	kernel_request.task_ptr = (uint64_t)(unsigned long)cmd;
273 	kernel_request.task_num = 1;
274 	kernel_request.sync_mode = RGA_BLIT_SYNC;
275 
276 	ret = rga_request_check(&kernel_request);
277 	if (ret < 0) {
278 		pr_err("user request check error!\n");
279 		goto err_free_request_by_id;
280 	}
281 
282 	request = rga_request_kernel_config(&kernel_request);
283 	if (IS_ERR(request)) {
284 		pr_err("request[%d] config failed!\n", kernel_request.id);
285 		ret = -EFAULT;
286 		goto err_free_request_by_id;
287 	}
288 
289 	if (DEBUGGER_EN(MSG)) {
290 		pr_info("kernel blit mode: request id = %d", kernel_request.id);
291 		rga_cmd_print_debug_info(cmd);
292 	}
293 
294 	ret = rga_request_submit(request);
295 	if (ret < 0) {
296 		pr_err("request[%d] submit failed!\n", kernel_request.id);
297 		goto err_put_request;
298 	}
299 
300 err_put_request:
301 	mutex_lock(&request_manager->lock);
302 	rga_request_put(request);
303 	mutex_unlock(&request_manager->lock);
304 
305 	rga_session_deinit(session);
306 
307 	return ret;
308 
309 err_free_request_by_id:
310 	mutex_lock(&request_manager->lock);
311 
312 	request = rga_request_lookup(request_manager, request_id);
313 	if (IS_ERR_OR_NULL(request)) {
314 		pr_err("can not find request from id[%d]", request_id);
315 		mutex_unlock(&request_manager->lock);
316 		return -EINVAL;
317 	}
318 
319 	rga_request_free(request);
320 
321 	mutex_unlock(&request_manager->lock);
322 
323 	return ret;
324 }
325 EXPORT_SYMBOL_GPL(rga_kernel_commit);
326 
hrtimer_handler(struct hrtimer * timer)327 static enum hrtimer_restart hrtimer_handler(struct hrtimer *timer)
328 {
329 	struct rga_drvdata_t *rga = rga_drvdata;
330 	struct rga_scheduler_t *scheduler = NULL;
331 	struct rga_job *job = NULL;
332 	unsigned long flags;
333 	int i;
334 
335 	ktime_t now = ktime_get();
336 
337 	for (i = 0; i < rga->num_of_scheduler; i++) {
338 		scheduler = rga->scheduler[i];
339 
340 		spin_lock_irqsave(&scheduler->irq_lock, flags);
341 
342 		/* if timer action on job running */
343 		job = scheduler->running_job;
344 		if (job) {
345 			scheduler->timer.busy_time += ktime_us_delta(now, job->hw_recoder_time);
346 			job->hw_recoder_time = now;
347 		}
348 
349 		scheduler->timer.busy_time_record = scheduler->timer.busy_time;
350 		scheduler->timer.busy_time = 0;
351 
352 		spin_unlock_irqrestore(&scheduler->irq_lock, flags);
353 	}
354 
355 	hrtimer_forward_now(timer, kt);
356 	return HRTIMER_RESTART;
357 }
358 
rga_init_timer(void)359 static void rga_init_timer(void)
360 {
361 	kt = ktime_set(0, RGA_TIMER_INTERVAL_NS);
362 	hrtimer_init(&timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
363 
364 	timer.function = hrtimer_handler;
365 
366 	hrtimer_start(&timer, kt, HRTIMER_MODE_REL);
367 }
368 
rga_cancel_timer(void)369 static void rga_cancel_timer(void)
370 {
371 	hrtimer_cancel(&timer);
372 }
373 
374 #ifndef RGA_DISABLE_PM
rga_power_enable(struct rga_scheduler_t * scheduler)375 int rga_power_enable(struct rga_scheduler_t *scheduler)
376 {
377 	int ret = -EINVAL;
378 	int i;
379 	unsigned long flags;
380 
381 	pm_runtime_get_sync(scheduler->dev);
382 	pm_stay_awake(scheduler->dev);
383 
384 	for (i = 0; i < scheduler->num_clks; i++) {
385 		if (!IS_ERR(scheduler->clks[i])) {
386 			ret = clk_prepare_enable(scheduler->clks[i]);
387 			if (ret < 0)
388 				goto err_enable_clk;
389 		}
390 	}
391 
392 	spin_lock_irqsave(&scheduler->irq_lock, flags);
393 
394 	scheduler->pd_refcount++;
395 	if (scheduler->status == RGA_SCHEDULER_IDLE)
396 		scheduler->status = RGA_SCHEDULER_WORKING;
397 
398 	spin_unlock_irqrestore(&scheduler->irq_lock, flags);
399 
400 	return 0;
401 
402 err_enable_clk:
403 	for (--i; i >= 0; --i)
404 		if (!IS_ERR(scheduler->clks[i]))
405 			clk_disable_unprepare(scheduler->clks[i]);
406 
407 	pm_relax(scheduler->dev);
408 	pm_runtime_put_sync_suspend(scheduler->dev);
409 
410 	return ret;
411 }
412 
rga_power_disable(struct rga_scheduler_t * scheduler)413 int rga_power_disable(struct rga_scheduler_t *scheduler)
414 {
415 	int i;
416 	unsigned long flags;
417 
418 	spin_lock_irqsave(&scheduler->irq_lock, flags);
419 
420 	if (scheduler->status == RGA_SCHEDULER_IDLE ||
421 	    scheduler->pd_refcount == 0) {
422 		spin_unlock_irqrestore(&scheduler->irq_lock, flags);
423 		WARN(true, "%s already idle!\n", dev_driver_string(scheduler->dev));
424 		return -1;
425 	}
426 
427 	scheduler->pd_refcount--;
428 	if (scheduler->pd_refcount == 0)
429 		scheduler->status = RGA_SCHEDULER_IDLE;
430 
431 	spin_unlock_irqrestore(&scheduler->irq_lock, flags);
432 
433 	for (i = scheduler->num_clks - 1; i >= 0; i--)
434 		if (!IS_ERR(scheduler->clks[i]))
435 			clk_disable_unprepare(scheduler->clks[i]);
436 
437 	pm_relax(scheduler->dev);
438 	pm_runtime_put_sync_suspend(scheduler->dev);
439 
440 	return 0;
441 }
442 
rga_power_enable_all(void)443 static void rga_power_enable_all(void)
444 {
445 	struct rga_scheduler_t *scheduler = NULL;
446 	int ret = 0;
447 	int i;
448 
449 	for (i = 0; i < rga_drvdata->num_of_scheduler; i++) {
450 		scheduler = rga_drvdata->scheduler[i];
451 		ret = rga_power_enable(scheduler);
452 		if (ret < 0)
453 			pr_err("power enable failed");
454 	}
455 }
456 
rga_power_disable_all(void)457 static void rga_power_disable_all(void)
458 {
459 	struct rga_scheduler_t *scheduler = NULL;
460 	int i;
461 
462 	for (i = 0; i < rga_drvdata->num_of_scheduler; i++) {
463 		scheduler = rga_drvdata->scheduler[i];
464 		rga_power_disable(scheduler);
465 	}
466 }
467 
468 #else
rga_power_enable(struct rga_scheduler_t * scheduler)469 int rga_power_enable(struct rga_scheduler_t *scheduler)
470 {
471 	return 0;
472 }
473 
rga_power_disable(struct rga_scheduler_t * scheduler)474 int rga_power_disable(struct rga_scheduler_t *scheduler)
475 {
476 	return 0;
477 }
478 
rga_power_enable_all(void)479 static inline void rga_power_enable_all(void) {}
rga_power_disable_all(void)480 static inline void rga_power_disable_all(void) {}
481 #endif /* #ifndef RGA_DISABLE_PM */
482 
rga_session_manager_init(struct rga_session_manager ** session_manager_ptr)483 static int rga_session_manager_init(struct rga_session_manager **session_manager_ptr)
484 {
485 	struct rga_session_manager *session_manager = NULL;
486 
487 	*session_manager_ptr = kzalloc(sizeof(struct rga_session_manager), GFP_KERNEL);
488 	if (*session_manager_ptr == NULL) {
489 		pr_err("can not kzalloc for rga_session_manager\n");
490 		return -ENOMEM;
491 	}
492 
493 	session_manager = *session_manager_ptr;
494 
495 	mutex_init(&session_manager->lock);
496 
497 	idr_init_base(&session_manager->ctx_id_idr, 1);
498 
499 	return 0;
500 }
501 
502 /*
503  * Called at driver close to release the rga session's id references.
504  */
rga_session_free_remove_idr_cb(int id,void * ptr,void * data)505 static int rga_session_free_remove_idr_cb(int id, void *ptr, void *data)
506 {
507 	struct rga_session *session = ptr;
508 
509 	idr_remove(&rga_drvdata->session_manager->ctx_id_idr, session->id);
510 	kfree(session);
511 
512 	return 0;
513 }
514 
rga_session_free_remove_idr(struct rga_session * session)515 static int rga_session_free_remove_idr(struct rga_session *session)
516 {
517 	struct rga_session_manager *session_manager;
518 
519 	session_manager = rga_drvdata->session_manager;
520 
521 	mutex_lock(&session_manager->lock);
522 
523 	session_manager->session_cnt--;
524 	idr_remove(&session_manager->ctx_id_idr, session->id);
525 
526 	mutex_unlock(&session_manager->lock);
527 
528 	return 0;
529 }
530 
rga_session_manager_remove(struct rga_session_manager ** session_manager_ptr)531 static int rga_session_manager_remove(struct rga_session_manager **session_manager_ptr)
532 {
533 	struct rga_session_manager *session_manager = *session_manager_ptr;
534 
535 	mutex_lock(&session_manager->lock);
536 
537 	idr_for_each(&session_manager->ctx_id_idr, &rga_session_free_remove_idr_cb, session_manager);
538 	idr_destroy(&session_manager->ctx_id_idr);
539 
540 	mutex_unlock(&session_manager->lock);
541 
542 	kfree(*session_manager_ptr);
543 
544 	*session_manager_ptr = NULL;
545 
546 	return 0;
547 }
548 
rga_session_init(void)549 static struct rga_session *rga_session_init(void)
550 {
551 	int new_id;
552 
553 	struct rga_session_manager *session_manager = NULL;
554 	struct rga_session *session = NULL;
555 
556 	session_manager = rga_drvdata->session_manager;
557 	if (session_manager == NULL) {
558 		pr_err("rga_session_manager is null!\n");
559 		return ERR_PTR(-EFAULT);
560 	}
561 
562 	session = kzalloc(sizeof(*session), GFP_KERNEL);
563 	if (!session) {
564 		pr_err("rga_session alloc failed\n");
565 		return ERR_PTR(-ENOMEM);
566 	}
567 
568 	mutex_lock(&session_manager->lock);
569 
570 	idr_preload(GFP_KERNEL);
571 	new_id = idr_alloc_cyclic(&session_manager->ctx_id_idr, session, 1, 0, GFP_NOWAIT);
572 	idr_preload_end();
573 	if (new_id < 0) {
574 		mutex_unlock(&session_manager->lock);
575 
576 		pr_err("rga_session alloc id failed!\n");
577 		kfree(session);
578 		return ERR_PTR(new_id);
579 	}
580 
581 	session->id = new_id;
582 	session_manager->session_cnt++;
583 
584 	mutex_unlock(&session_manager->lock);
585 
586 	session->tgid = current->tgid;
587 	session->pname = kstrdup_quotable_cmdline(current, GFP_KERNEL);
588 
589 	return session;
590 }
591 
rga_session_deinit(struct rga_session * session)592 static int rga_session_deinit(struct rga_session *session)
593 {
594 	rga_request_session_destroy_abort(session);
595 	rga_mm_session_release_buffer(session);
596 
597 	rga_session_free_remove_idr(session);
598 
599 	kfree(session->pname);
600 	kfree(session);
601 
602 	return 0;
603 }
604 
rga_ioctl_import_buffer(unsigned long arg,struct rga_session * session)605 static long rga_ioctl_import_buffer(unsigned long arg, struct rga_session *session)
606 {
607 	int i;
608 	int ret = 0;
609 	struct rga_buffer_pool buffer_pool;
610 	struct rga_external_buffer *external_buffer = NULL;
611 
612 	if (unlikely(copy_from_user(&buffer_pool,
613 				    (struct rga_buffer_pool *)arg,
614 				    sizeof(buffer_pool)))) {
615 		pr_err("rga_buffer_pool copy_from_user failed!\n");
616 		return -EFAULT;
617 	}
618 
619 	if (buffer_pool.size > RGA_BUFFER_POOL_SIZE_MAX) {
620 		pr_err("Cannot import more than %d buffers at a time!\n",
621 		       RGA_BUFFER_POOL_SIZE_MAX);
622 		return -EFBIG;
623 	}
624 
625 	if (buffer_pool.buffers_ptr == 0) {
626 		pr_err("Import buffers is NULL!\n");
627 		return -EFAULT;
628 	}
629 
630 	external_buffer = kmalloc(sizeof(struct rga_external_buffer) * buffer_pool.size,
631 				  GFP_KERNEL);
632 	if (external_buffer == NULL) {
633 		pr_err("external buffer list alloc error!\n");
634 		return -ENOMEM;
635 	}
636 
637 	if (unlikely(copy_from_user(external_buffer,
638 				    u64_to_user_ptr(buffer_pool.buffers_ptr),
639 				    sizeof(struct rga_external_buffer) * buffer_pool.size))) {
640 		pr_err("rga_buffer_pool external_buffer list copy_from_user failed\n");
641 		ret = -EFAULT;
642 
643 		goto err_free_external_buffer;
644 	}
645 
646 	for (i = 0; i < buffer_pool.size; i++) {
647 		if (DEBUGGER_EN(MSG)) {
648 			pr_info("import buffer info:\n");
649 			rga_dump_external_buffer(&external_buffer[i]);
650 		}
651 
652 		ret = rga_mm_import_buffer(&external_buffer[i], session);
653 		if (ret == 0) {
654 			pr_err("buffer[%d] mm import buffer failed! memory = 0x%lx, type = %s(0x%x)\n",
655 			       i, (unsigned long)external_buffer[i].memory,
656 			       rga_get_memory_type_str(external_buffer[i].type),
657 			       external_buffer[i].type);
658 
659 			goto err_free_external_buffer;
660 		}
661 
662 		external_buffer[i].handle = ret;
663 	}
664 
665 	if (unlikely(copy_to_user(u64_to_user_ptr(buffer_pool.buffers_ptr),
666 				  external_buffer,
667 				  sizeof(struct rga_external_buffer) * buffer_pool.size))) {
668 		pr_err("rga_buffer_pool external_buffer list copy_to_user failed\n");
669 		ret = -EFAULT;
670 
671 		goto err_free_external_buffer;
672 	}
673 
674 err_free_external_buffer:
675 	kfree(external_buffer);
676 	return ret;
677 }
678 
rga_ioctl_release_buffer(unsigned long arg)679 static long rga_ioctl_release_buffer(unsigned long arg)
680 {
681 	int i;
682 	int ret = 0;
683 	struct rga_buffer_pool buffer_pool;
684 	struct rga_external_buffer *external_buffer = NULL;
685 
686 	if (unlikely(copy_from_user(&buffer_pool,
687 				    (struct rga_buffer_pool *)arg,
688 				    sizeof(buffer_pool)))) {
689 		pr_err("rga_buffer_pool  copy_from_user failed!\n");
690 		return -EFAULT;
691 	}
692 
693 	if (buffer_pool.size > RGA_BUFFER_POOL_SIZE_MAX) {
694 		pr_err("Cannot release more than %d buffers at a time!\n",
695 		       RGA_BUFFER_POOL_SIZE_MAX);
696 		return -EFBIG;
697 	}
698 
699 	if (buffer_pool.buffers_ptr == 0) {
700 		pr_err("Release buffers is NULL!\n");
701 		return -EFAULT;
702 	}
703 
704 	external_buffer = kmalloc(sizeof(struct rga_external_buffer) * buffer_pool.size,
705 				  GFP_KERNEL);
706 	if (external_buffer == NULL) {
707 		pr_err("external buffer list alloc error!\n");
708 		return -ENOMEM;
709 	}
710 
711 	if (unlikely(copy_from_user(external_buffer,
712 				    u64_to_user_ptr(buffer_pool.buffers_ptr),
713 				    sizeof(struct rga_external_buffer) * buffer_pool.size))) {
714 		pr_err("rga_buffer_pool external_buffer list copy_from_user failed\n");
715 		ret = -EFAULT;
716 
717 		goto err_free_external_buffer;
718 	}
719 
720 	for (i = 0; i < buffer_pool.size; i++) {
721 		if (DEBUGGER_EN(MSG))
722 			pr_info("release buffer handle[%d]\n", external_buffer[i].handle);
723 
724 		ret = rga_mm_release_buffer(external_buffer[i].handle);
725 		if (ret < 0) {
726 			pr_err("buffer[%d] mm release buffer failed! handle = %d\n",
727 			       i, external_buffer[i].handle);
728 
729 			goto err_free_external_buffer;
730 		}
731 	}
732 
733 err_free_external_buffer:
734 	kfree(external_buffer);
735 	return ret;
736 }
737 
rga_ioctl_request_create(unsigned long arg,struct rga_session * session)738 static long rga_ioctl_request_create(unsigned long arg, struct rga_session *session)
739 {
740 	uint32_t id;
741 	uint32_t flags;
742 
743 	if (copy_from_user(&flags, (void *)arg, sizeof(uint32_t))) {
744 		pr_err("%s failed to copy from usrer!\n", __func__);
745 		return -EFAULT;
746 	}
747 
748 	id = rga_request_alloc(flags, session);
749 
750 	if (copy_to_user((void *)arg, &id, sizeof(uint32_t))) {
751 		pr_err("%s failed to copy to usrer!\n", __func__);
752 		return -EFAULT;
753 	}
754 
755 	return 0;
756 }
757 
rga_ioctl_request_submit(unsigned long arg,bool run_enbale)758 static long rga_ioctl_request_submit(unsigned long arg, bool run_enbale)
759 {
760 	int ret = 0;
761 	struct rga_pending_request_manager *request_manager = NULL;
762 	struct rga_user_request user_request;
763 	struct rga_request *request = NULL;
764 
765 	request_manager = rga_drvdata->pend_request_manager;
766 
767 	if (unlikely(copy_from_user(&user_request,
768 				    (struct rga_user_request *)arg,
769 				    sizeof(user_request)))) {
770 		pr_err("%s copy_from_user failed!\n", __func__);
771 		return -EFAULT;
772 	}
773 
774 	ret = rga_request_check(&user_request);
775 	if (ret < 0) {
776 		pr_err("user request check error!\n");
777 		return ret;
778 	}
779 
780 	if (DEBUGGER_EN(MSG))
781 		pr_info("config request id = %d", user_request.id);
782 
783 	request = rga_request_config(&user_request);
784 	if (IS_ERR_OR_NULL(request)) {
785 		pr_err("request[%d] config failed!\n", user_request.id);
786 		return -EFAULT;
787 	}
788 
789 	if (run_enbale) {
790 		ret = rga_request_submit(request);
791 		if (ret < 0) {
792 			pr_err("request[%d] submit failed!\n", user_request.id);
793 			return -EFAULT;
794 		}
795 
796 		if (request->sync_mode == RGA_BLIT_ASYNC) {
797 			user_request.release_fence_fd = request->release_fence_fd;
798 			if (copy_to_user((struct rga_req *)arg,
799 					 &user_request, sizeof(user_request))) {
800 				pr_err("copy_to_user failed\n");
801 				return -EFAULT;
802 			}
803 		}
804 	}
805 
806 	mutex_lock(&request_manager->lock);
807 	rga_request_put(request);
808 	mutex_unlock(&request_manager->lock);
809 
810 	return 0;
811 }
812 
rga_ioctl_request_cancel(unsigned long arg)813 static long rga_ioctl_request_cancel(unsigned long arg)
814 {
815 	uint32_t id;
816 	struct rga_pending_request_manager *request_manager;
817 	struct rga_request *request;
818 
819 	request_manager = rga_drvdata->pend_request_manager;
820 	if (request_manager == NULL) {
821 		pr_err("rga_pending_request_manager is null!\n");
822 		return -EFAULT;
823 	}
824 
825 	if (unlikely(copy_from_user(&id, (uint32_t *)arg, sizeof(uint32_t)))) {
826 		pr_err("request id copy_from_user failed!\n");
827 		return -EFAULT;
828 	}
829 
830 	if (DEBUGGER_EN(MSG))
831 		pr_info("config cancel request id = %d", id);
832 
833 	mutex_lock(&request_manager->lock);
834 
835 	request = rga_request_lookup(request_manager, id);
836 	if (IS_ERR_OR_NULL(request)) {
837 		pr_err("can not find request from id[%d]", id);
838 		mutex_unlock(&request_manager->lock);
839 		return -EINVAL;
840 	}
841 
842 	rga_request_put(request);
843 
844 	mutex_unlock(&request_manager->lock);
845 
846 	return 0;
847 }
848 
rga_ioctl_blit(unsigned long arg,uint32_t cmd,struct rga_session * session)849 static long rga_ioctl_blit(unsigned long arg, uint32_t cmd, struct rga_session *session)
850 {
851 	int ret = 0;
852 	int request_id;
853 	struct rga_user_request user_request;
854 	struct rga_req *rga_req;
855 	struct rga_request *request = NULL;
856 	struct rga_pending_request_manager *request_manager = rga_drvdata->pend_request_manager;
857 
858 	request_id = rga_request_alloc(0, session);
859 	if (request_id < 0) {
860 		pr_err("request alloc error!\n");
861 		ret = request_id;
862 		return ret;
863 	}
864 
865 	memset(&user_request, 0, sizeof(user_request));
866 	user_request.id = request_id;
867 	user_request.task_ptr = arg;
868 	user_request.task_num = 1;
869 	user_request.sync_mode = cmd;
870 
871 	ret = rga_request_check(&user_request);
872 	if (ret < 0) {
873 		pr_err("user request check error!\n");
874 		goto err_free_request_by_id;
875 	}
876 
877 	request = rga_request_config(&user_request);
878 	if (IS_ERR(request)) {
879 		pr_err("request[%d] config failed!\n", user_request.id);
880 		ret = -EFAULT;
881 		goto err_free_request_by_id;
882 	}
883 
884 	rga_req = request->task_list;
885 	/* In the BLIT_SYNC/BLIT_ASYNC command, in_fence_fd needs to be set. */
886 	request->acquire_fence_fd = rga_req->in_fence_fd;
887 
888 	if (DEBUGGER_EN(MSG)) {
889 		pr_info("Blit mode: request id = %d", user_request.id);
890 		rga_cmd_print_debug_info(rga_req);
891 	}
892 
893 	ret = rga_request_submit(request);
894 	if (ret < 0) {
895 		pr_err("request[%d] submit failed!\n", user_request.id);
896 		goto err_put_request;
897 	}
898 
899 	if (request->sync_mode == RGA_BLIT_ASYNC) {
900 		rga_req->out_fence_fd = request->release_fence_fd;
901 		if (copy_to_user((struct rga_req *)arg, rga_req, sizeof(struct rga_req))) {
902 			pr_err("copy_to_user failed\n");
903 			ret = -EFAULT;
904 			goto err_put_request;
905 		}
906 	}
907 
908 err_put_request:
909 	mutex_lock(&request_manager->lock);
910 	rga_request_put(request);
911 	mutex_unlock(&request_manager->lock);
912 
913 	return ret;
914 
915 err_free_request_by_id:
916 	mutex_lock(&request_manager->lock);
917 
918 	request = rga_request_lookup(request_manager, request_id);
919 	if (IS_ERR_OR_NULL(request)) {
920 		pr_err("can not find request from id[%d]", request_id);
921 		mutex_unlock(&request_manager->lock);
922 		return -EINVAL;
923 	}
924 
925 	rga_request_free(request);
926 
927 	mutex_unlock(&request_manager->lock);
928 
929 	return ret;
930 }
931 
rga_ioctl(struct file * file,uint32_t cmd,unsigned long arg)932 static long rga_ioctl(struct file *file, uint32_t cmd, unsigned long arg)
933 {
934 	int ret = 0;
935 	int i = 0;
936 	int major_version = 0, minor_version = 0;
937 	char version[16] = { 0 };
938 	struct rga_version_t driver_version;
939 	struct rga_hw_versions_t hw_versions;
940 	struct rga_drvdata_t *rga = rga_drvdata;
941 	struct rga_session *session = file->private_data;
942 
943 	if (!rga) {
944 		pr_err("rga_drvdata is null, rga is not init\n");
945 		return -ENODEV;
946 	}
947 
948 	if (DEBUGGER_EN(NONUSE))
949 		return 0;
950 
951 	switch (cmd) {
952 	case RGA_BLIT_SYNC:
953 	case RGA_BLIT_ASYNC:
954 		ret = rga_ioctl_blit(arg, cmd, session);
955 
956 		break;
957 	case RGA_CACHE_FLUSH:
958 	case RGA_FLUSH:
959 	case RGA_GET_RESULT:
960 		break;
961 	case RGA_GET_VERSION:
962 		sscanf(rga->scheduler[i]->version.str, "%x.%x.%*x",
963 			 &major_version, &minor_version);
964 		snprintf(version, 5, "%x.%02x", major_version, minor_version);
965 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0))
966 		/* TODO: userspcae to get version */
967 		if (copy_to_user((void *)arg, version, sizeof(version)))
968 			ret = -EFAULT;
969 #else
970 		if (copy_to_user((void *)arg, RGA3_VERSION,
971 				 sizeof(RGA3_VERSION)))
972 			ret = -EFAULT;
973 #endif
974 		break;
975 	case RGA2_GET_VERSION:
976 		for (i = 0; i < rga->num_of_scheduler; i++) {
977 			if (rga->scheduler[i]->ops == &rga2_ops) {
978 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0))
979 				if (copy_to_user((void *)arg, rga->scheduler[i]->version.str,
980 					sizeof(rga->scheduler[i]->version.str)))
981 					ret = -EFAULT;
982 #else
983 				if (copy_to_user((void *)arg, RGA3_VERSION,
984 						sizeof(RGA3_VERSION)))
985 					ret = -EFAULT;
986 #endif
987 				else
988 					ret = true;
989 
990 				break;
991 			}
992 		}
993 
994 		/* This will indicate that the RGA2 version number cannot be obtained. */
995 		if (ret != true)
996 			ret = -EFAULT;
997 
998 		break;
999 
1000 	case RGA_IOC_GET_HW_VERSION:
1001 		/* RGA hardware version */
1002 		hw_versions.size = rga->num_of_scheduler > RGA_HW_SIZE ?
1003 			RGA_HW_SIZE : rga->num_of_scheduler;
1004 
1005 		for (i = 0; i < hw_versions.size; i++) {
1006 			memcpy(&hw_versions.version[i], &rga->scheduler[i]->version,
1007 				sizeof(rga->scheduler[i]->version));
1008 		}
1009 
1010 		if (copy_to_user((void *)arg, &hw_versions, sizeof(hw_versions)))
1011 			ret = -EFAULT;
1012 		else
1013 			ret = true;
1014 
1015 		break;
1016 
1017 	case RGA_IOC_GET_DRVIER_VERSION:
1018 		/* Driver version */
1019 		driver_version.major = DRIVER_MAJOR_VERISON;
1020 		driver_version.minor = DRIVER_MINOR_VERSION;
1021 		driver_version.revision = DRIVER_REVISION_VERSION;
1022 		strncpy((char *)driver_version.str, DRIVER_VERSION, sizeof(driver_version.str));
1023 
1024 		if (copy_to_user((void *)arg, &driver_version, sizeof(driver_version)))
1025 			ret = -EFAULT;
1026 		else
1027 			ret = true;
1028 
1029 		break;
1030 
1031 	case RGA_IOC_IMPORT_BUFFER:
1032 		rga_power_enable_all();
1033 
1034 		ret = rga_ioctl_import_buffer(arg, session);
1035 
1036 		rga_power_disable_all();
1037 
1038 		break;
1039 
1040 	case RGA_IOC_RELEASE_BUFFER:
1041 		rga_power_enable_all();
1042 
1043 		ret = rga_ioctl_release_buffer(arg);
1044 
1045 		rga_power_disable_all();
1046 
1047 		break;
1048 
1049 	case RGA_IOC_REQUEST_CREATE:
1050 		ret = rga_ioctl_request_create(arg, session);
1051 
1052 		break;
1053 
1054 	case RGA_IOC_REQUEST_SUBMIT:
1055 		ret = rga_ioctl_request_submit(arg, true);
1056 
1057 		break;
1058 
1059 	case RGA_IOC_REQUEST_CONFIG:
1060 		ret = rga_ioctl_request_submit(arg, false);
1061 
1062 		break;
1063 
1064 	case RGA_IOC_REQUEST_CANCEL:
1065 		ret = rga_ioctl_request_cancel(arg);
1066 
1067 		break;
1068 
1069 	case RGA_IMPORT_DMA:
1070 	case RGA_RELEASE_DMA:
1071 	default:
1072 		pr_err("unknown ioctl cmd!\n");
1073 		ret = -EINVAL;
1074 		break;
1075 	}
1076 
1077 	return ret;
1078 }
1079 
1080 #ifdef CONFIG_ROCKCHIP_RGA_DEBUGGER
rga_debugger_init(struct rga_debugger ** debugger_p)1081 static int rga_debugger_init(struct rga_debugger **debugger_p)
1082 {
1083 	struct rga_debugger *debugger;
1084 
1085 	*debugger_p = kzalloc(sizeof(struct rga_debugger), GFP_KERNEL);
1086 	if (*debugger_p == NULL) {
1087 		pr_err("can not alloc for rga debugger\n");
1088 		return -ENOMEM;
1089 	}
1090 
1091 	debugger = *debugger_p;
1092 
1093 #ifdef CONFIG_ROCKCHIP_RGA_DEBUG_FS
1094 	mutex_init(&debugger->debugfs_lock);
1095 	INIT_LIST_HEAD(&debugger->debugfs_entry_list);
1096 #endif
1097 
1098 #ifdef CONFIG_ROCKCHIP_RGA_PROC_FS
1099 	mutex_init(&debugger->procfs_lock);
1100 	INIT_LIST_HEAD(&debugger->procfs_entry_list);
1101 #endif
1102 
1103 	rga_debugfs_init();
1104 	rga_procfs_init();
1105 
1106 	return 0;
1107 }
1108 
rga_debugger_remove(struct rga_debugger ** debugger_p)1109 static int rga_debugger_remove(struct rga_debugger **debugger_p)
1110 {
1111 	rga_debugfs_remove();
1112 	rga_procfs_remove();
1113 
1114 	kfree(*debugger_p);
1115 	*debugger_p = NULL;
1116 
1117 	return 0;
1118 }
1119 #endif
1120 
rga_open(struct inode * inode,struct file * file)1121 static int rga_open(struct inode *inode, struct file *file)
1122 {
1123 	struct rga_session *session = NULL;
1124 
1125 	session = rga_session_init();
1126 	if (IS_ERR(session))
1127 		return PTR_ERR(session);
1128 
1129 	file->private_data = (void *)session;
1130 
1131 	return nonseekable_open(inode, file);
1132 }
1133 
rga_release(struct inode * inode,struct file * file)1134 static int rga_release(struct inode *inode, struct file *file)
1135 {
1136 	struct rga_session *session = file->private_data;
1137 
1138 	rga_session_deinit(session);
1139 
1140 	return 0;
1141 }
1142 
rga_irq_handler(int irq,void * data)1143 static irqreturn_t rga_irq_handler(int irq, void *data)
1144 {
1145 	irqreturn_t irq_ret = IRQ_NONE;
1146 	struct rga_scheduler_t *scheduler = data;
1147 
1148 	if (scheduler->ops->irq)
1149 		irq_ret = scheduler->ops->irq(scheduler);
1150 
1151 	return irq_ret;
1152 }
1153 
rga_isr_thread(int irq,void * data)1154 static irqreturn_t rga_isr_thread(int irq, void *data)
1155 {
1156 	irqreturn_t irq_ret = IRQ_NONE;
1157 	struct rga_scheduler_t *scheduler = data;
1158 	struct rga_job *job;
1159 
1160 	job = rga_job_done(scheduler);
1161 	if (job == NULL) {
1162 		pr_err("isr thread invalid job!\n");
1163 		return IRQ_HANDLED;
1164 	}
1165 
1166 	if (scheduler->ops->isr_thread)
1167 		irq_ret = scheduler->ops->isr_thread(job, scheduler);
1168 
1169 	rga_request_release_signal(scheduler, job);
1170 
1171 	rga_job_next(scheduler);
1172 
1173 	rga_power_disable(scheduler);
1174 
1175 	return irq_ret;
1176 }
1177 
1178 const struct file_operations rga_fops = {
1179 	.owner = THIS_MODULE,
1180 	.open = rga_open,
1181 	.release = rga_release,
1182 	.unlocked_ioctl = rga_ioctl,
1183 #ifdef CONFIG_COMPAT
1184 	.compat_ioctl = rga_ioctl,
1185 #endif
1186 };
1187 
1188 static struct miscdevice rga_dev = {
1189 	.minor = MISC_DYNAMIC_MINOR,
1190 	.name = "rga",
1191 	.fops = &rga_fops,
1192 };
1193 
1194 static const char *const old_rga2_clks[] = {
1195 	"aclk_rga",
1196 	"hclk_rga",
1197 	"clk_rga",
1198 };
1199 
1200 static const char *const rk3588_rga2_clks[] = {
1201 	"aclk_rga2",
1202 	"hclk_rga2",
1203 	"clk_rga2",
1204 };
1205 
1206 static const char *const rga3_core_0_clks[] = {
1207 	"aclk_rga3_0",
1208 	"hclk_rga3_0",
1209 	"clk_rga3_0",
1210 };
1211 
1212 static const char *const rga3_core_1_clks[] = {
1213 	"aclk_rga3_1",
1214 	"hclk_rga3_1",
1215 	"clk_rga3_1",
1216 };
1217 
1218 static const struct rga_match_data_t old_rga2_match_data = {
1219 	.clks = old_rga2_clks,
1220 	.num_clks = ARRAY_SIZE(old_rga2_clks),
1221 };
1222 
1223 static const struct rga_match_data_t rk3588_rga2_match_data = {
1224 	.clks = rk3588_rga2_clks,
1225 	.num_clks = ARRAY_SIZE(rk3588_rga2_clks),
1226 };
1227 
1228 static const struct rga_match_data_t rga3_core0_match_data = {
1229 	.clks = rga3_core_0_clks,
1230 	.num_clks = ARRAY_SIZE(rga3_core_0_clks),
1231 };
1232 
1233 static const struct rga_match_data_t rga3_core1_match_data = {
1234 	.clks = rga3_core_1_clks,
1235 	.num_clks = ARRAY_SIZE(rga3_core_1_clks),
1236 };
1237 
1238 static const struct of_device_id rga3_core0_dt_ids[] = {
1239 	{
1240 	 .compatible = "rockchip,rga3_core0",
1241 	 .data = &rga3_core0_match_data,
1242 	},
1243 	{},
1244 };
1245 
1246 static const struct of_device_id rga3_core1_dt_ids[] = {
1247 	{
1248 	 .compatible = "rockchip,rga3_core1",
1249 	 .data = &rga3_core1_match_data,
1250 	},
1251 	{},
1252 };
1253 
1254 static const struct of_device_id rga2_dt_ids[] = {
1255 	{
1256 	 .compatible = "rockchip,rga2_core0",
1257 	 .data = &rk3588_rga2_match_data,
1258 	},
1259 	{
1260 	 .compatible = "rockchip,rga2",
1261 	 .data = &old_rga2_match_data,
1262 	},
1263 	{},
1264 };
1265 
init_scheduler(struct rga_scheduler_t * scheduler,const char * name)1266 static void init_scheduler(struct rga_scheduler_t *scheduler,
1267 			 const char *name)
1268 {
1269 	spin_lock_init(&scheduler->irq_lock);
1270 	INIT_LIST_HEAD(&scheduler->todo_list);
1271 	init_waitqueue_head(&scheduler->job_done_wq);
1272 
1273 	if (!strcmp(name, "rga3_core0")) {
1274 		scheduler->ops = &rga3_ops;
1275 		/* TODO: get by hw version */
1276 		scheduler->core = RGA3_SCHEDULER_CORE0;
1277 	} else if (!strcmp(name, "rga3_core1")) {
1278 		scheduler->ops = &rga3_ops;
1279 		scheduler->core = RGA3_SCHEDULER_CORE1;
1280 	} else if (!strcmp(name, "rga2")) {
1281 		scheduler->ops = &rga2_ops;
1282 		scheduler->core = RGA2_SCHEDULER_CORE0;
1283 	}
1284 }
1285 
rga_drv_probe(struct platform_device * pdev)1286 static int rga_drv_probe(struct platform_device *pdev)
1287 {
1288 #ifndef RGA_DISABLE_PM
1289 	int i;
1290 #endif
1291 	int ret = 0;
1292 	int irq;
1293 	struct resource *res;
1294 	const struct rga_match_data_t *match_data;
1295 	const struct of_device_id *match;
1296 	struct rga_scheduler_t *scheduler;
1297 	struct device *dev = &pdev->dev;
1298 	struct rga_drvdata_t *data = rga_drvdata;
1299 
1300 	if (!dev->of_node)
1301 		return -EINVAL;
1302 
1303 	if (!strcmp(dev_driver_string(dev), "rga3_core0"))
1304 		match = of_match_device(rga3_core0_dt_ids, dev);
1305 	else if (!strcmp(dev_driver_string(dev), "rga3_core1"))
1306 		match = of_match_device(rga3_core1_dt_ids, dev);
1307 	else if (!strcmp(dev_driver_string(dev), "rga2"))
1308 		match = of_match_device(rga2_dt_ids, dev);
1309 	else
1310 		match = NULL;
1311 
1312 	if (!match) {
1313 		dev_err(dev, "%s missing DT entry!\n", dev_driver_string(dev));
1314 		return -EINVAL;
1315 	}
1316 
1317 	scheduler = devm_kzalloc(dev, sizeof(struct rga_scheduler_t), GFP_KERNEL);
1318 	if (scheduler == NULL) {
1319 		pr_err("failed to allocate scheduler. dev name = %s\n", dev_driver_string(dev));
1320 		return -ENOMEM;
1321 	}
1322 
1323 	init_scheduler(scheduler, dev_driver_string(dev));
1324 
1325 	scheduler->dev = dev;
1326 
1327 	/* map the registers */
1328 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1329 	if (!res) {
1330 		pr_err("get memory resource failed.\n");
1331 		return -ENXIO;
1332 	}
1333 
1334 	scheduler->rga_base = devm_ioremap(dev, res->start, resource_size(res));
1335 	if (!scheduler->rga_base) {
1336 		pr_err("ioremap failed\n");
1337 		ret = -ENOENT;
1338 		return ret;
1339 	}
1340 
1341 	/* get the IRQ */
1342 	match_data = match->data;
1343 
1344 	/* there are irq names in dts */
1345 	irq = platform_get_irq(pdev, 0);
1346 	if (irq < 0) {
1347 		dev_err(dev, "no irq %s in dts\n", dev_driver_string(dev));
1348 		return irq;
1349 	}
1350 
1351 	scheduler->irq = irq;
1352 
1353 	pr_info("%s, irq = %d, match scheduler\n", dev_driver_string(dev), irq);
1354 
1355 	ret = devm_request_threaded_irq(dev, irq,
1356 					rga_irq_handler,
1357 					rga_isr_thread,
1358 					IRQF_SHARED,
1359 					dev_driver_string(dev), scheduler);
1360 	if (ret < 0) {
1361 		pr_err("request irq name: %s failed: %d\n", dev_driver_string(dev), ret);
1362 		return ret;
1363 	}
1364 
1365 
1366 #ifndef RGA_DISABLE_PM
1367 	/* clk init */
1368 	for (i = 0; i < match_data->num_clks; i++) {
1369 		struct clk *clk = devm_clk_get(dev, match_data->clks[i]);
1370 
1371 		if (IS_ERR(clk))
1372 			pr_err("failed to get %s\n", match_data->clks[i]);
1373 
1374 		scheduler->clks[i] = clk;
1375 	}
1376 	scheduler->num_clks = match_data->num_clks;
1377 
1378 	/* PM init */
1379 	device_init_wakeup(dev, true);
1380 	pm_runtime_enable(scheduler->dev);
1381 
1382 	ret = pm_runtime_get_sync(scheduler->dev);
1383 	if (ret < 0) {
1384 		pr_err("failed to get pm runtime, ret = %d\n", ret);
1385 		goto pm_disable;
1386 	}
1387 
1388 	for (i = 0; i < scheduler->num_clks; i++) {
1389 		if (!IS_ERR(scheduler->clks[i])) {
1390 			ret = clk_prepare_enable(scheduler->clks[i]);
1391 			if (ret < 0) {
1392 				pr_err("failed to enable clk\n");
1393 				goto pm_disable;
1394 			}
1395 		}
1396 	}
1397 #endif /* #ifndef RGA_DISABLE_PM */
1398 
1399 	scheduler->ops->get_version(scheduler);
1400 	pr_info("%s hardware loaded successfully, hw_version:%s.\n",
1401 		dev_driver_string(dev), scheduler->version.str);
1402 
1403 	/* TODO: get by hw version, Currently only supports judgment 1106. */
1404 	if (scheduler->core == RGA3_SCHEDULER_CORE0 ||
1405 	    scheduler->core == RGA3_SCHEDULER_CORE1) {
1406 		scheduler->data = &rga3_data;
1407 	} else if (scheduler->core == RGA2_SCHEDULER_CORE0) {
1408 		if (!strcmp(scheduler->version.str, "3.3.87975"))
1409 			scheduler->data = &rga2e_1106_data;
1410 		else if (!strcmp(scheduler->version.str, "3.6.92812") ||
1411 			 !strcmp(scheduler->version.str, "3.7.93215"))
1412 			scheduler->data = &rga2e_iommu_data;
1413 		else
1414 			scheduler->data = &rga2e_data;
1415 	}
1416 
1417 	data->scheduler[data->num_of_scheduler] = scheduler;
1418 
1419 	data->num_of_scheduler++;
1420 
1421 #ifndef RGA_DISABLE_PM
1422 	for (i = scheduler->num_clks - 1; i >= 0; i--)
1423 		if (!IS_ERR(scheduler->clks[i]))
1424 			clk_disable_unprepare(scheduler->clks[i]);
1425 
1426 	pm_runtime_put_sync(dev);
1427 #endif /* #ifndef RGA_DISABLE_PM */
1428 
1429 	if (scheduler->data->mmu == RGA_IOMMU) {
1430 		scheduler->iommu_info = rga_iommu_probe(dev);
1431 		if (IS_ERR(scheduler->iommu_info)) {
1432 			dev_err(dev, "failed to attach iommu\n");
1433 			scheduler->iommu_info = NULL;
1434 		}
1435 	}
1436 
1437 	platform_set_drvdata(pdev, scheduler);
1438 
1439 	pr_info("%s probe successfully\n", dev_driver_string(dev));
1440 
1441 	return 0;
1442 
1443 #ifndef RGA_DISABLE_PM
1444 pm_disable:
1445 	device_init_wakeup(dev, false);
1446 	pm_runtime_disable(dev);
1447 #endif /* #ifndef RGA_DISABLE_PM */
1448 
1449 	return ret;
1450 }
1451 
rga_drv_remove(struct platform_device * pdev)1452 static int rga_drv_remove(struct platform_device *pdev)
1453 {
1454 #ifndef RGA_DISABLE_PM
1455 	device_init_wakeup(&pdev->dev, false);
1456 	pm_runtime_disable(&pdev->dev);
1457 #endif /* #ifndef RGA_DISABLE_PM */
1458 
1459 	return 0;
1460 }
1461 
1462 static struct platform_driver rga3_core0_driver = {
1463 	.probe = rga_drv_probe,
1464 	.remove = rga_drv_remove,
1465 	.driver = {
1466 		 .name = "rga3_core0",
1467 		 .of_match_table = of_match_ptr(rga3_core0_dt_ids),
1468 		 },
1469 };
1470 
1471 static struct platform_driver rga3_core1_driver = {
1472 	.probe = rga_drv_probe,
1473 	.remove = rga_drv_remove,
1474 	.driver = {
1475 		 .name = "rga3_core1",
1476 		 .of_match_table = of_match_ptr(rga3_core1_dt_ids),
1477 		 },
1478 };
1479 
1480 static struct platform_driver rga2_driver = {
1481 	.probe = rga_drv_probe,
1482 	.remove = rga_drv_remove,
1483 	.driver = {
1484 		 .name = "rga2",
1485 		 .of_match_table = of_match_ptr(rga2_dt_ids),
1486 		 },
1487 };
1488 
rga_init(void)1489 static int __init rga_init(void)
1490 {
1491 	int ret;
1492 
1493 	rga_drvdata = kzalloc(sizeof(struct rga_drvdata_t), GFP_KERNEL);
1494 	if (rga_drvdata == NULL) {
1495 		pr_err("failed to allocate driver data.\n");
1496 		return -ENOMEM;
1497 	}
1498 
1499 	mutex_init(&rga_drvdata->lock);
1500 
1501 	ret = platform_driver_register(&rga3_core0_driver);
1502 	if (ret != 0) {
1503 		pr_err("Platform device rga3_core0_driver register failed (%d).\n", ret);
1504 		goto err_free_drvdata;
1505 	}
1506 
1507 	ret = platform_driver_register(&rga3_core1_driver);
1508 	if (ret != 0) {
1509 		pr_err("Platform device rga3_core1_driver register failed (%d).\n", ret);
1510 		goto err_unregister_rga3_core0;
1511 	}
1512 
1513 	ret = platform_driver_register(&rga2_driver);
1514 	if (ret != 0) {
1515 		pr_err("Platform device rga2_driver register failed (%d).\n", ret);
1516 		goto err_unregister_rga3_core1;
1517 	}
1518 
1519 	ret = rga_iommu_bind();
1520 	if (ret < 0) {
1521 		pr_err("rga iommu bind failed!\n");
1522 		goto err_unregister_rga2;
1523 	}
1524 
1525 	ret = misc_register(&rga_dev);
1526 	if (ret) {
1527 		pr_err("cannot register miscdev (%d)\n", ret);
1528 		goto err_unbind_iommu;
1529 	}
1530 
1531 	rga_init_timer();
1532 
1533 	rga_mm_init(&rga_drvdata->mm);
1534 
1535 	rga_request_manager_init(&rga_drvdata->pend_request_manager);
1536 
1537 	rga_session_manager_init(&rga_drvdata->session_manager);
1538 
1539 #ifdef CONFIG_ROCKCHIP_RGA_ASYNC
1540 	rga_fence_context_init(&rga_drvdata->fence_ctx);
1541 #endif
1542 
1543 #ifdef CONFIG_ROCKCHIP_RGA_DEBUGGER
1544 	rga_debugger_init(&rga_drvdata->debugger);
1545 #endif
1546 
1547 	pr_info("Module initialized. v%s\n", DRIVER_VERSION);
1548 
1549 	return 0;
1550 
1551 err_unbind_iommu:
1552 	rga_iommu_unbind();
1553 
1554 err_unregister_rga2:
1555 	platform_driver_unregister(&rga2_driver);
1556 
1557 err_unregister_rga3_core1:
1558 	platform_driver_unregister(&rga3_core1_driver);
1559 
1560 err_unregister_rga3_core0:
1561 	platform_driver_unregister(&rga3_core0_driver);
1562 
1563 err_free_drvdata:
1564 	kfree(rga_drvdata);
1565 
1566 	return ret;
1567 }
1568 
rga_exit(void)1569 static void __exit rga_exit(void)
1570 {
1571 #ifdef CONFIG_ROCKCHIP_RGA_DEBUGGER
1572 	rga_debugger_remove(&rga_drvdata->debugger);
1573 #endif
1574 
1575 #ifdef CONFIG_ROCKCHIP_RGA_ASYNC
1576 	rga_fence_context_remove(&rga_drvdata->fence_ctx);
1577 #endif
1578 
1579 	rga_mm_remove(&rga_drvdata->mm);
1580 
1581 	rga_request_manager_remove(&rga_drvdata->pend_request_manager);
1582 
1583 	rga_session_manager_remove(&rga_drvdata->session_manager);
1584 
1585 	rga_cancel_timer();
1586 
1587 	rga_iommu_unbind();
1588 
1589 	platform_driver_unregister(&rga3_core0_driver);
1590 	platform_driver_unregister(&rga3_core1_driver);
1591 	platform_driver_unregister(&rga2_driver);
1592 
1593 	misc_deregister(&rga_dev);
1594 
1595 	kfree(rga_drvdata);
1596 
1597 	pr_info("Module exited. v%s\n", DRIVER_VERSION);
1598 }
1599 
1600 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0))
1601 #ifdef CONFIG_ROCKCHIP_THUNDER_BOOT
1602 module_init(rga_init);
1603 #elif defined CONFIG_VIDEO_REVERSE_IMAGE
1604 fs_initcall(rga_init);
1605 #else
1606 late_initcall(rga_init);
1607 #endif
1608 #else
1609 fs_initcall(rga_init);
1610 #endif
1611 module_exit(rga_exit);
1612 
1613 /* Module information */
1614 MODULE_AUTHOR("putin.li@rock-chips.com");
1615 MODULE_DESCRIPTION("Driver for rga device");
1616 MODULE_LICENSE("GPL");
1617 #ifdef MODULE_IMPORT_NS
1618 MODULE_IMPORT_NS(VFS_internal_I_am_really_a_filesystem_and_am_NOT_a_driver);
1619 #endif
1620