xref: /OK3568_Linux_fs/kernel/drivers/video/rockchip/rga/rga_drv.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1 /*
2  * Copyright (C) 2012 ROCKCHIP, Inc.
3  *
4  * This software is licensed under the terms of the GNU General Public
5  * License version 2, as published by the Free Software Foundation, and
6  * may be copied, distributed, and modified under those terms.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
11  * GNU General Public License for more details.
12  *
13  */
14 
15 #define pr_fmt(fmt) "rga: " fmt
16 #include <linux/kernel.h>
17 #include <linux/init.h>
18 #include <linux/module.h>
19 #include <linux/platform_device.h>
20 #include <linux/sched.h>
21 #include <linux/mutex.h>
22 #include <linux/err.h>
23 #include <linux/clk.h>
24 #include <asm/delay.h>
25 #include <linux/dma-mapping.h>
26 #include <linux/delay.h>
27 #include <asm/io.h>
28 #include <linux/irq.h>
29 #include <linux/interrupt.h>
30 //#include <mach/io.h>
31 //#include <mach/irqs.h>
32 #include <linux/fs.h>
33 #include <linux/uaccess.h>
34 #include <linux/miscdevice.h>
35 #include <linux/poll.h>
36 #include <linux/delay.h>
37 #include <linux/wait.h>
38 #include <linux/syscalls.h>
39 #include <linux/timer.h>
40 #include <linux/time.h>
41 #include <asm/cacheflush.h>
42 #include <linux/slab.h>
43 #include <linux/fb.h>
44 #include <linux/wakelock.h>
45 #include <linux/version.h>
46 #include <linux/debugfs.h>
47 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0))
48 #include <linux/dma-buf.h>
49 #include <linux/pm_runtime.h>
50 #endif
51 
52 #if defined(CONFIG_ION_ROCKCHIP)
53 #include <linux/rockchip_ion.h>
54 #endif
55 
56 #include "rga.h"
57 #include "rga_reg_info.h"
58 #include "rga_mmu_info.h"
59 #include "RGA_API.h"
60 
61 #define RGA_TEST_CASE 0
62 
63 #define RGA_TEST_FLUSH_TIME 0
64 #define RGA_INFO_BUS_ERROR 1
65 
66 #define RGA_PRE_SCALE_BUF_SIZE (2048 * 2048 * 4)
67 #define RGA_PRE_SCALE_PAGE_SIZE (RGA_PRE_SCALE_BUF_SIZE >> PAGE_SHIFT)
68 
69 #define RGA_POWER_OFF_DELAY	4*HZ /* 4s */
70 #define RGA_TIMEOUT_DELAY	2*HZ /* 2s */
71 
72 #define RGA_MAJOR		255
73 
74 #if defined(CONFIG_ARCH_RK2928) || defined(CONFIG_ARCH_RK3026)
75 #define RK30_RGA_PHYS		RK2928_RGA_PHYS
76 #define RK30_RGA_SIZE		RK2928_RGA_SIZE
77 #endif
78 #define RGA_RESET_TIMEOUT	1000
79 
80 /* Driver information */
81 #define DRIVER_DESC		"RGA Device Driver"
82 #define DRIVER_NAME		"rga"
83 
84 
85 ktime_t rga_start;
86 ktime_t rga_end;
87 
88 static rga_session rga_session_global;
89 
90 long (*rga_ioctl_kernel_p)(struct rga_req *);
91 
92 #if RGA_DEBUGFS
93 unsigned char RGA_TEST_REG;
94 unsigned char RGA_TEST_MSG;
95 unsigned char RGA_TEST_TIME;
96 unsigned char RGA_CHECK_MODE;
97 unsigned char RGA_NONUSE;
98 unsigned char RGA_INT_FLAG;
99 #endif
100 
101 struct rga_drvdata *rga_drvdata;
102 rga_service_info rga_service;
103 struct rga_mmu_buf_t rga_mmu_buf;
104 
105 
106 #if defined(CONFIG_ION_ROCKCHIP)
107 extern struct ion_client *rockchip_ion_client_create(const char * name);
108 #endif
109 
110 static int rga_blit_async(rga_session *session, struct rga_req *req);
111 static void rga_del_running_list(void);
112 static void rga_del_running_list_timeout(void);
113 static void rga_try_set_reg(void);
114 
115 
116 /* Logging */
117 #define RGA_DEBUG 1
118 #if RGA_DEBUG
119 #define DBG(format, args...) printk(KERN_DEBUG "%s: " format, DRIVER_NAME, ## args)
120 #define ERR(format, args...) printk(KERN_ERR "%s: " format, DRIVER_NAME, ## args)
121 #define WARNING(format, args...) printk(KERN_WARN "%s: " format, DRIVER_NAME, ## args)
122 #define INFO(format, args...) printk(KERN_INFO "%s: " format, DRIVER_NAME, ## args)
123 #else
124 #define DBG(format, args...)
125 #define ERR(format, args...)
126 #define WARNING(format, args...)
127 #define INFO(format, args...)
128 #endif
129 
130 #if RGA_DEBUGFS
rga_get_cmd_mode_str(u32 cmd)131 static const char *rga_get_cmd_mode_str(u32 cmd)
132 {
133 	switch (cmd) {
134 	case RGA_BLIT_SYNC:
135 		return "RGA_BLIT_SYNC";
136 	case RGA_BLIT_ASYNC:
137 		return "RGA_BLIT_ASYNC";
138 	case RGA_FLUSH:
139 		return "RGA_FLUSH";
140 	case RGA_GET_RESULT:
141 		return "RGA_GET_RESULT";
142 	case RGA_GET_VERSION:
143 		return "RGA_GET_VERSION";
144 	default:
145 		return "UNF";
146 	}
147 }
148 
rga_get_blend_mode_str(u16 alpha_rop_flag)149 static const char *rga_get_blend_mode_str(u16 alpha_rop_flag)
150 {
151 	if (alpha_rop_flag == 0)
152 		return "no blend";
153 	else if (alpha_rop_flag == 0x19)
154 		return "blend mode 105 src + (1 - src.a) * dst";
155 	else if (alpha_rop_flag == 0x11)
156 		return "blend mode 405 src.a * src + (1 - src.a) * dst";
157 	else
158 		return "check reg for more imformation";
159 }
160 
rga_get_render_mode_str(u8 mode)161 static const char *rga_get_render_mode_str(u8 mode)
162 {
163 	switch (mode & 0x0F) {
164 	case 0x0:
165 		return "bitblt";
166 	case 0x1:
167 		return "color_palette";
168 	case 0x2:
169 		return "color_fill";
170 	case 0x3:
171 		return "line_point_drawing";
172 	case 0x4:
173 		return "blur_sharp_filter";
174 	case 0x5:
175 		return "pre_scaling";
176 	case 0x6:
177 		return "update_palette_table";
178 	case 0x7:
179 		return "update_patten_buff";
180 	default:
181 		return "UNF";
182 	}
183 }
184 
rga_get_rotate_mode_str(struct rga_req * req_rga)185 static const char *rga_get_rotate_mode_str(struct rga_req *req_rga)
186 {
187 	switch (req_rga->rotate_mode) {
188 	case 0x0:
189 		return "no rotate";
190 	case 0x1:
191 		if (req_rga->sina == 0 && req_rga->cosa == 65536)
192 			/* rotate 0 */
193 			return "rotate 0";
194 		else if (req_rga->sina == 65536 && req_rga->cosa == 0)
195 			/* rotate 90 */
196 			return "rotate 90 ";
197 		else if (req_rga->sina == 0 && req_rga->cosa == -65536)
198 			/* rotate 180 */
199 			return "rotate 180 ";
200 		else if (req_rga->sina == -65536 && req_rga->cosa == 0)
201 			/* totate 270 */
202 			return "rotate 270 ";
203 		return "UNF";
204 	case 0x2:
205 		return "xmirror";
206 	case 0x3:
207 		return "ymirror";
208 	default:
209 		return "UNF";
210 	}
211 }
212 
rga_is_yuv10bit_format(uint32_t format)213 static bool rga_is_yuv10bit_format(uint32_t format)
214 {
215 	bool ret  = false;
216 
217 	switch (format) {
218 	case RK_FORMAT_YCbCr_420_SP_10B:
219 	case RK_FORMAT_YCrCb_420_SP_10B:
220 		ret = true;
221 		break;
222 	}
223 	return ret;
224 }
225 
rga_is_yuv8bit_format(uint32_t format)226 static bool rga_is_yuv8bit_format(uint32_t format)
227 {
228 	bool ret  = false;
229 
230 	switch (format) {
231 	case RK_FORMAT_YCbCr_422_SP:
232 	case RK_FORMAT_YCbCr_422_P:
233 	case RK_FORMAT_YCbCr_420_SP:
234 	case RK_FORMAT_YCbCr_420_P:
235 	case RK_FORMAT_YCrCb_422_SP:
236 	case RK_FORMAT_YCrCb_422_P:
237 	case RK_FORMAT_YCrCb_420_SP:
238 	case RK_FORMAT_YCrCb_420_P:
239 		ret = true;
240 		break;
241 	}
242 	return ret;
243 }
244 
rga_get_format_name(uint32_t format)245 static const char *rga_get_format_name(uint32_t format)
246 {
247 	switch (format) {
248 	case RK_FORMAT_RGBA_8888:
249 		return "RGBA8888";
250 	case RK_FORMAT_RGBX_8888:
251 		return "RGBX8888";
252 	case RK_FORMAT_RGB_888:
253 		return "RGB888";
254 	case RK_FORMAT_BGRA_8888:
255 		return "BGRA8888";
256 	case RK_FORMAT_RGB_565:
257 		return "RGB565";
258 	case RK_FORMAT_RGBA_5551:
259 		return "RGBA5551";
260 	case RK_FORMAT_RGBA_4444:
261 		return "RGBA4444";
262 	case RK_FORMAT_BGR_888:
263 		return "BGR888";
264 
265 	case RK_FORMAT_YCbCr_422_SP:
266 		return "YCbCr422SP";
267 	case RK_FORMAT_YCbCr_422_P:
268 		return "YCbCr422P";
269 	case RK_FORMAT_YCbCr_420_SP:
270 		return "YCbCr420SP";
271 	case RK_FORMAT_YCbCr_420_P:
272 		return "YCbCr420P";
273 	case RK_FORMAT_YCrCb_422_SP:
274 		return "YCrCb422SP";
275 	case RK_FORMAT_YCrCb_422_P:
276 		return "YCrCb422P";
277 	case RK_FORMAT_YCrCb_420_SP:
278 		return "YCrCb420SP";
279 	case RK_FORMAT_YCrCb_420_P:
280 		return "YCrCb420P";
281 
282 	case RK_FORMAT_BPP1:
283 		return "BPP1";
284 	case RK_FORMAT_BPP2:
285 		return "BPP2";
286 	case RK_FORMAT_BPP4:
287 		return "BPP4";
288 	case RK_FORMAT_BPP8:
289 		return "BPP8";
290 	case RK_FORMAT_YCbCr_420_SP_10B:
291 		return "YCrCb420SP10B";
292 	case RK_FORMAT_YCrCb_420_SP_10B:
293 		return "YCbCr420SP10B";
294 	default:
295 		return "UNF";
296 	}
297 }
298 
print_debug_info(struct rga_req * req)299 static void print_debug_info(struct rga_req *req)
300 {
301 	DBG("render_mode %s, rotate_mode %s, blit mode %d\n",
302 	    rga_get_render_mode_str(req->render_mode),
303 	    rga_get_rotate_mode_str(req), req->bsfilter_flag);
304 	DBG("src : y=%lx uv=%lx v=%lx format=%s aw=%d ah=%d vw=%d vh=%d xoff=%d yoff=%d\n",
305 	    req->src.yrgb_addr, req->src.uv_addr, req->src.v_addr,
306 	    rga_get_format_name(req->src.format),
307 	    req->src.act_w, req->src.act_h, req->src.vir_w, req->src.vir_h,
308 	    req->src.x_offset, req->src.y_offset);
309 	DBG("dst : y=%lx uv=%lx v=%lx format=%s aw=%d ah=%d vw=%d vh=%d xoff=%d yoff=%d\n",
310 	    req->dst.yrgb_addr, req->dst.uv_addr, req->dst.v_addr,
311 	    rga_get_format_name(req->dst.format),
312 	    req->dst.act_w, req->dst.act_h, req->dst.vir_w, req->dst.vir_h,
313 	    req->dst.x_offset, req->dst.y_offset);
314 	DBG("mmuflg = %.8x, mmuen is %d\n", req->mmu_info.mmu_flag, req->mmu_info.mmu_en);
315 	DBG("clip.xmin = %d, clip.xmax = %d, clip.ymin = %d, clip.ymax = %d\n",
316 	    req->clip.xmin, req->clip.xmax, req->clip.ymin, req->clip.ymax);
317 	DBG("alpha: flag %.8x mode=%.8x\n", req->alpha_rop_flag, req->alpha_rop_mode);
318 	DBG("blend mode:%s\n", rga_get_blend_mode_str(req->alpha_rop_flag));
319 	DBG("yuv2rgb mode:%x\n", req->yuv2rgb_mode);
320 }
321 
rga_align_check(struct rga_req * req)322 static int rga_align_check(struct rga_req *req)
323 {
324 	if (rga_is_yuv10bit_format(req->src.format)) {
325 		if ((req->src.vir_w % 16) || (req->src.x_offset % 2) ||
326 		    (req->src.act_w % 2) || (req->src.y_offset % 2) ||
327 		    (req->src.act_h % 2) || (req->src.vir_h % 2))
328 			DBG("err src wstride is not align to 16 or yuv not align to 2");
329 	}
330 	if (rga_is_yuv10bit_format(req->dst.format)) {
331 		if ((req->dst.vir_w % 16) || (req->dst.x_offset % 2) ||
332 		    (req->dst.act_w % 2) || (req->dst.y_offset % 2) ||
333 		    (req->dst.act_h % 2) || (req->dst.vir_h % 2))
334 			DBG("err dst wstride is not align to 16 or yuv not align to 2");
335 	}
336 	if (rga_is_yuv8bit_format(req->src.format)) {
337 		if ((req->src.vir_w % 8) || (req->src.x_offset % 2) ||
338 		    (req->src.act_w % 2) || (req->src.y_offset % 2) ||
339 		    (req->src.act_h % 2) || (req->src.vir_h % 2))
340 			DBG("err src wstride is not align to 8 or yuv not align to 2");
341 	}
342 	if (rga_is_yuv8bit_format(req->dst.format)) {
343 		if ((req->dst.vir_w % 8) || (req->dst.x_offset % 2) ||
344 		    (req->dst.act_w % 2) || (req->dst.y_offset % 2) ||
345 		    (req->dst.act_h % 2) || (req->dst.vir_h % 2))
346 			DBG("err dst wstride is not align to 8 or yuv not align to 2");
347 	}
348 	DBG("rga align check over!\n");
349 	return 0;
350 }
351 
rga_memory_check(void * vaddr,u32 w,u32 h,u32 format,int fd)352 static int rga_memory_check(void *vaddr, u32 w, u32 h, u32 format, int fd)
353 {
354 	int bits = 32;
355 	int temp_data = 0;
356 	void *one_line = kzalloc(w * 4, GFP_KERNEL);
357 
358 	if (!one_line) {
359 		pr_err("kzalloc fail %s[%d]\n", __func__, __LINE__);
360 		return 0;
361 	}
362 
363 	switch (format) {
364 	case RK_FORMAT_RGBA_8888:
365 	case RK_FORMAT_RGBX_8888:
366 	case RK_FORMAT_BGRA_8888:
367 		bits = 32;
368 		break;
369 	case RK_FORMAT_RGB_888:
370 	case RK_FORMAT_BGR_888:
371 		bits = 24;
372 		break;
373 	case RK_FORMAT_RGB_565:
374 	case RK_FORMAT_RGBA_5551:
375 	case RK_FORMAT_RGBA_4444:
376 	case RK_FORMAT_YCbCr_422_SP:
377 	case RK_FORMAT_YCbCr_422_P:
378 	case RK_FORMAT_YCrCb_422_SP:
379 	case RK_FORMAT_YCrCb_422_P:
380 		bits = 16;
381 		break;
382 	case RK_FORMAT_YCbCr_420_SP:
383 	case RK_FORMAT_YCbCr_420_P:
384 	case RK_FORMAT_YCrCb_420_SP:
385 	case RK_FORMAT_YCrCb_420_P:
386 		bits = 12;
387 		break;
388 	case RK_FORMAT_YCbCr_420_SP_10B:
389 	case RK_FORMAT_YCrCb_420_SP_10B:
390 		bits = 15;
391 		break;
392 	default:
393 		DBG("un know format\n");
394 		kfree(one_line);
395 		return -1;
396 	}
397 	temp_data = w * (h - 1) * bits / 8;
398 	if (fd > 0) {
399 		DBG("vaddr is%p, bits is %d, fd check\n", vaddr, bits);
400 		memcpy(one_line, (char *)vaddr + temp_data, w * bits / 8);
401 		DBG("fd check ok\n");
402 	} else {
403 		DBG("vir addr memory check.\n");
404 		memcpy((void *)((char *)vaddr + temp_data), one_line, w * bits / 8);
405 		DBG("vir addr check ok.\n");
406 	}
407 	kfree(one_line);
408 	return 0;
409 }
410 #endif
411 
rga_write(u32 b,u32 r)412 static inline void rga_write(u32 b, u32 r)
413 {
414 	__raw_writel(b, rga_drvdata->rga_base + r);
415 }
416 
rga_read(u32 r)417 static inline u32 rga_read(u32 r)
418 {
419 	return __raw_readl(rga_drvdata->rga_base + r);
420 }
421 
rga_soft_reset(void)422 static void rga_soft_reset(void)
423 {
424 	u32 i;
425 	u32 reg;
426 
427 	rga_write(1, RGA_SYS_CTRL); //RGA_SYS_CTRL
428 
429 	for(i = 0; i < RGA_RESET_TIMEOUT; i++)
430 	{
431 		reg = rga_read(RGA_SYS_CTRL) & 1; //RGA_SYS_CTRL
432 
433 		if(reg == 0)
434 			break;
435 
436 		udelay(1);
437 	}
438 
439 	if(i == RGA_RESET_TIMEOUT)
440 		ERR("soft reset timeout.\n");
441 }
442 
rga_dump(void)443 static void rga_dump(void)
444 {
445 	int running;
446     struct rga_reg *reg, *reg_tmp;
447     rga_session *session, *session_tmp;
448 
449 	running = atomic_read(&rga_service.total_running);
450 	printk("rga total_running %d\n", running);
451 
452     #if 0
453 
454     /* Dump waiting list info */
455     if (!list_empty(&rga_service.waiting))
456     {
457         list_head	*next;
458 
459         next = &rga_service.waiting;
460 
461         printk("rga_service dump waiting list\n");
462 
463         do
464         {
465             reg = list_entry(next->next, struct rga_reg, status_link);
466             running = atomic_read(&reg->session->task_running);
467             num_done = atomic_read(&reg->session->num_done);
468             printk("rga session pid %d, done %d, running %d\n", reg->session->pid, num_done, running);
469             next = next->next;
470         }
471         while(!list_empty(next));
472     }
473 
474     /* Dump running list info */
475     if (!list_empty(&rga_service.running))
476     {
477         printk("rga_service dump running list\n");
478 
479         list_head	*next;
480 
481         next = &rga_service.running;
482         do
483         {
484             reg = list_entry(next->next, struct rga_reg, status_link);
485             running = atomic_read(&reg->session->task_running);
486             num_done = atomic_read(&reg->session->num_done);
487             printk("rga session pid %d, done %d, running %d:\n", reg->session->pid, num_done, running);
488             next = next->next;
489         }
490         while(!list_empty(next));
491     }
492     #endif
493 
494 	list_for_each_entry_safe(session, session_tmp, &rga_service.session, list_session)
495     {
496 		printk("session pid %d:\n", session->pid);
497 		running = atomic_read(&session->task_running);
498 		printk("task_running %d\n", running);
499 		list_for_each_entry_safe(reg, reg_tmp, &session->waiting, session_link)
500         {
501 			printk("waiting register set 0x %.lu\n", (unsigned long)reg);
502 		}
503 		list_for_each_entry_safe(reg, reg_tmp, &session->running, session_link)
504         {
505 			printk("running register set 0x %.lu\n", (unsigned long)reg);
506 		}
507 	}
508 }
509 
rga_queue_power_off_work(void)510 static inline void rga_queue_power_off_work(void)
511 {
512 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0))
513 	queue_delayed_work(system_wq, &rga_drvdata->power_off_work, RGA_POWER_OFF_DELAY);
514 #else
515 	queue_delayed_work(system_nrt_wq, &rga_drvdata->power_off_work, RGA_POWER_OFF_DELAY);
516 #endif
517 }
518 
519 /* Caller must hold rga_service.lock */
rga_power_on(void)520 static void rga_power_on(void)
521 {
522 	static ktime_t last;
523 	ktime_t now = ktime_get();
524 
525 	if (ktime_to_ns(ktime_sub(now, last)) > NSEC_PER_SEC) {
526 		cancel_delayed_work_sync(&rga_drvdata->power_off_work);
527 		rga_queue_power_off_work();
528 		last = now;
529 	}
530 	if (rga_service.enable)
531 		return;
532 
533 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0))
534 	clk_prepare_enable(rga_drvdata->aclk_rga);
535 	clk_prepare_enable(rga_drvdata->hclk_rga);
536 	pm_runtime_get_sync(rga_drvdata->dev);
537 #else
538 	clk_prepare_enable(rga_drvdata->aclk_rga);
539 	clk_prepare_enable(rga_drvdata->hclk_rga);
540 	if (rga_drvdata->pd_rga)
541 		clk_prepare_enable(rga_drvdata->pd_rga);
542 #endif
543 
544 	wake_lock(&rga_drvdata->wake_lock);
545 	rga_service.enable = true;
546 }
547 
548 /* Caller must hold rga_service.lock */
rga_power_off(void)549 static void rga_power_off(void)
550 {
551 	int total_running;
552 
553 	if (!rga_service.enable) {
554 		return;
555 	}
556 
557 	total_running = atomic_read(&rga_service.total_running);
558 	if (total_running) {
559 		pr_err("power off when %d task running!!\n", total_running);
560 		mdelay(50);
561 		pr_err("delay 50 ms for running task\n");
562 		rga_dump();
563 	}
564 
565 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0))
566 	pm_runtime_put(rga_drvdata->dev);
567 	clk_disable_unprepare(rga_drvdata->aclk_rga);
568 	clk_disable_unprepare(rga_drvdata->hclk_rga);
569 #else
570 	if (rga_drvdata->pd_rga)
571 		clk_disable_unprepare(rga_drvdata->pd_rga);
572 	clk_disable_unprepare(rga_drvdata->aclk_rga);
573 	clk_disable_unprepare(rga_drvdata->hclk_rga);
574 #endif
575 	wake_unlock(&rga_drvdata->wake_lock);
576 	rga_service.enable = false;
577 }
578 
rga_power_off_work(struct work_struct * work)579 static void rga_power_off_work(struct work_struct *work)
580 {
581 	if (mutex_trylock(&rga_service.lock)) {
582 		rga_power_off();
583 		mutex_unlock(&rga_service.lock);
584 	} else {
585 		/* Come back later if the device is busy... */
586 
587 		rga_queue_power_off_work();
588 	}
589 }
590 
rga_flush(rga_session * session,unsigned long arg)591 static int rga_flush(rga_session *session, unsigned long arg)
592 {
593     int ret = 0;
594     int ret_timeout;
595 
596     #if RGA_TEST_FLUSH_TIME
597     ktime_t start;
598     ktime_t end;
599     start = ktime_get();
600     #endif
601 
602     ret_timeout = wait_event_timeout(session->wait, atomic_read(&session->done), RGA_TIMEOUT_DELAY);
603 
604 	if (unlikely(ret_timeout < 0)) {
605 		//pr_err("flush pid %d wait task ret %d\n", session->pid, ret);
606         mutex_lock(&rga_service.lock);
607         rga_del_running_list();
608         mutex_unlock(&rga_service.lock);
609         ret = ret_timeout;
610 	} else if (0 == ret_timeout) {
611 		//pr_err("flush pid %d wait %d task done timeout\n", session->pid, atomic_read(&session->task_running));
612         //printk("bus  = %.8x\n", rga_read(RGA_INT));
613         mutex_lock(&rga_service.lock);
614         rga_del_running_list_timeout();
615         rga_try_set_reg();
616         mutex_unlock(&rga_service.lock);
617 		ret = -ETIMEDOUT;
618 	}
619 
620 #if RGA_TEST_FLUSH_TIME
621     end = ktime_get();
622     end = ktime_sub(end, start);
623     printk("one flush wait time %d\n", (int)ktime_to_us(end));
624 #endif
625 
626 	return ret;
627 }
628 
629 
rga_get_result(rga_session * session,unsigned long arg)630 static int rga_get_result(rga_session *session, unsigned long arg)
631 {
632 	//printk("rga_get_result %d\n",rga_drvdata->rga_result);
633 
634     int ret = 0;
635 
636     int num_done;
637 
638     num_done = atomic_read(&session->num_done);
639 
640 	if (unlikely(copy_to_user((void __user *)arg, &num_done, sizeof(int)))) {
641 			printk("copy_to_user failed\n");
642 			ret =  -EFAULT;
643 		}
644 	return ret;
645 }
646 
647 
rga_check_param(const struct rga_req * req)648 static int rga_check_param(const struct rga_req *req)
649 {
650 	/*RGA can support up to 8192*8192 resolution in RGB format,but we limit the image size to 8191*8191 here*/
651 	//check src width and height
652 
653     if(!((req->render_mode == color_fill_mode) || (req->render_mode == line_point_drawing_mode)))
654     {
655     	if (unlikely((req->src.act_w <= 0) || (req->src.act_w > 8191) || (req->src.act_h <= 0) || (req->src.act_h > 8191)))
656         {
657     		printk("invalid source resolution act_w = %d, act_h = %d\n", req->src.act_w, req->src.act_h);
658     		return  -EINVAL;
659     	}
660     }
661 
662     if(!((req->render_mode == color_fill_mode) || (req->render_mode == line_point_drawing_mode)))
663     {
664     	if (unlikely((req->src.vir_w <= 0) || (req->src.vir_w > 8191) || (req->src.vir_h <= 0) || (req->src.vir_h > 8191)))
665         {
666     		printk("invalid source resolution vir_w = %d, vir_h = %d\n", req->src.vir_w, req->src.vir_h);
667     		return  -EINVAL;
668     	}
669     }
670 
671 	//check dst width and height
672 	if (unlikely((req->dst.act_w <= 0) || (req->dst.act_w > 2048) || (req->dst.act_h <= 0) || (req->dst.act_h > 2048)))
673     {
674 		printk("invalid destination resolution act_w = %d, act_h = %d\n", req->dst.act_w, req->dst.act_h);
675 		return	-EINVAL;
676 	}
677 
678     if (unlikely((req->dst.vir_w <= 0) || (req->dst.vir_w > 4096) || (req->dst.vir_h <= 0) || (req->dst.vir_h > 2048)))
679     {
680 		printk("invalid destination resolution vir_w = %d, vir_h = %d\n", req->dst.vir_w, req->dst.vir_h);
681 		return	-EINVAL;
682 	}
683 
684 	//check src_vir_w
685 	if(unlikely(req->src.vir_w < req->src.act_w)){
686 		printk("invalid src_vir_w act_w = %d, vir_w = %d\n", req->src.act_w, req->src.vir_w);
687 		return	-EINVAL;
688 	}
689 
690 	//check dst_vir_w
691 	if(unlikely(req->dst.vir_w < req->dst.act_w)){
692         if(req->rotate_mode != 1)
693         {
694 		    printk("invalid dst_vir_w act_h = %d, vir_h = %d\n", req->dst.act_w, req->dst.vir_w);
695 		    return	-EINVAL;
696         }
697 	}
698 
699 	return 0;
700 }
701 
rga_copy_reg(struct rga_reg * reg,uint32_t offset)702 static void rga_copy_reg(struct rga_reg *reg, uint32_t offset)
703 {
704     uint32_t i;
705     uint32_t *cmd_buf;
706     uint32_t *reg_p;
707 
708     if(atomic_read(&reg->session->task_running) != 0)
709     {
710         printk(KERN_ERR "task_running is no zero\n");
711     }
712 
713     atomic_add(1, &rga_service.cmd_num);
714 	atomic_add(1, &reg->session->task_running);
715 
716     cmd_buf = (uint32_t *)rga_service.cmd_buff + offset*32;
717     reg_p = (uint32_t *)reg->cmd_reg;
718 
719     for(i=0; i<32; i++)
720         cmd_buf[i] = reg_p[i];
721 }
722 
rga_reg_init(rga_session * session,struct rga_req * req)723 static struct rga_reg * rga_reg_init(rga_session *session, struct rga_req *req)
724 {
725     int32_t ret;
726 	struct rga_reg *reg = kzalloc(sizeof(struct rga_reg), GFP_KERNEL);
727 	if (NULL == reg) {
728 		pr_err("kmalloc fail in rga_reg_init\n");
729 		return NULL;
730 	}
731 
732     reg->session = session;
733 	INIT_LIST_HEAD(&reg->session_link);
734 	INIT_LIST_HEAD(&reg->status_link);
735 
736     reg->MMU_base = NULL;
737 
738     if (req->mmu_info.mmu_en)
739     {
740         ret = rga_set_mmu_info(reg, req);
741         if(ret < 0)
742         {
743             printk("%s, [%d] set mmu info error \n", __FUNCTION__, __LINE__);
744             if(reg != NULL)
745             {
746                 kfree(reg);
747             }
748             return NULL;
749         }
750     }
751 
752     if(RGA_gen_reg_info(req, (uint8_t *)reg->cmd_reg) == -1)
753     {
754         printk("gen reg info error\n");
755         if(reg != NULL)
756         {
757             kfree(reg);
758         }
759         return NULL;
760     }
761 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0))
762 	reg->sg_src = req->sg_src;
763 	reg->sg_dst = req->sg_dst;
764 	reg->attach_src = req->attach_src;
765 	reg->attach_dst = req->attach_dst;
766 #endif
767 
768     mutex_lock(&rga_service.lock);
769 	list_add_tail(&reg->status_link, &rga_service.waiting);
770 	list_add_tail(&reg->session_link, &session->waiting);
771 	mutex_unlock(&rga_service.lock);
772 
773     return reg;
774 }
775 
776 /* Caller must hold rga_service.lock */
rga_reg_deinit(struct rga_reg * reg)777 static void rga_reg_deinit(struct rga_reg *reg)
778 {
779 	list_del_init(&reg->session_link);
780 	list_del_init(&reg->status_link);
781 	kfree(reg);
782 }
783 
784 /* Caller must hold rga_service.lock */
rga_reg_from_wait_to_run(struct rga_reg * reg)785 static void rga_reg_from_wait_to_run(struct rga_reg *reg)
786 {
787 	list_del_init(&reg->status_link);
788 	list_add_tail(&reg->status_link, &rga_service.running);
789 
790 	list_del_init(&reg->session_link);
791 	list_add_tail(&reg->session_link, &reg->session->running);
792 }
793 
794 /* Caller must hold rga_service.lock */
rga_service_session_clear(rga_session * session)795 static void rga_service_session_clear(rga_session *session)
796 {
797 	struct rga_reg *reg, *n;
798 
799     list_for_each_entry_safe(reg, n, &session->waiting, session_link)
800     {
801 		rga_reg_deinit(reg);
802 	}
803 
804     list_for_each_entry_safe(reg, n, &session->running, session_link)
805     {
806 		rga_reg_deinit(reg);
807 	}
808 }
809 
810 /* Caller must hold rga_service.lock */
rga_try_set_reg(void)811 static void rga_try_set_reg(void)
812 {
813     struct rga_reg *reg ;
814 
815     if (list_empty(&rga_service.running))
816     {
817         if (!list_empty(&rga_service.waiting))
818         {
819             /* RGA is idle */
820             reg = list_entry(rga_service.waiting.next, struct rga_reg, status_link);
821 
822             rga_power_on();
823             udelay(1);
824 
825             rga_copy_reg(reg, 0);
826             rga_reg_from_wait_to_run(reg);
827 			rga_dma_flush_range(&rga_service.cmd_buff[0], &rga_service.cmd_buff[32]);
828 
829             rga_soft_reset();
830 
831             rga_write(0x0, RGA_SYS_CTRL);
832             rga_write(0, RGA_MMU_CTRL);
833 
834             /* CMD buff */
835             rga_write(virt_to_phys(rga_service.cmd_buff), RGA_CMD_ADDR);
836 
837 #if RGA_DEBUGFS
838 	if (RGA_TEST_REG) {
839                 //printk(KERN_DEBUG "cmd_addr = %.8x\n", rga_read(RGA_CMD_ADDR));
840                 uint32_t i;
841                 uint32_t *p;
842                 p = rga_service.cmd_buff;
843                 printk("CMD_REG\n");
844                 for (i=0; i<7; i++)
845                     printk("%.8x %.8x %.8x %.8x\n", p[0 + i*4], p[1+i*4], p[2 + i*4], p[3 + i*4]);
846                 printk("%.8x %.8x\n", p[0 + i*4], p[1+i*4]);
847 	}
848 #endif
849 
850             /* master mode */
851             rga_write((0x1<<2)|(0x1<<3), RGA_SYS_CTRL);
852 
853             /* All CMD finish int */
854             rga_write(rga_read(RGA_INT)|(0x1<<10)|(0x1<<9)|(0x1<<8), RGA_INT);
855 
856 #if RGA_DEBUGFS
857 	if (RGA_TEST_TIME)
858 		rga_start = ktime_get();
859 #endif
860 
861             /* Start proc */
862             atomic_set(&reg->session->done, 0);
863             rga_write(0x1, RGA_CMD_CTRL);
864 
865 #if RGA_DEBUGFS
866 	if (RGA_TEST_REG) {
867                 uint32_t i;
868                 printk("CMD_READ_BACK_REG\n");
869                 for (i=0; i<7; i++)
870                     printk("%.8x %.8x %.8x %.8x\n", rga_read(0x100 + i*16 + 0),
871                             rga_read(0x100 + i*16 + 4), rga_read(0x100 + i*16 + 8), rga_read(0x100 + i*16 + 12));
872                 printk("%.8x %.8x\n", rga_read(0x100 + i*16 + 0), rga_read(0x100 + i*16 + 4));
873 	}
874 #endif
875         }
876     }
877 }
878 
879 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0))
rga_put_dma_buf(struct rga_req * req,struct rga_reg * reg)880 static int rga_put_dma_buf(struct rga_req *req, struct rga_reg *reg)
881 {
882 	struct dma_buf_attachment *attach = NULL;
883 	struct sg_table *sgt = NULL;
884 	struct dma_buf *dma_buf = NULL;
885 
886 	if (!req && !reg)
887 		return -EINVAL;
888 
889 	attach = (!reg) ? req->attach_src : reg->attach_src;
890 	sgt = (!reg) ? req->sg_src : reg->sg_src;
891 	if (attach && sgt)
892 		dma_buf_unmap_attachment(attach, sgt, DMA_BIDIRECTIONAL);
893 	if (attach) {
894 		dma_buf = attach->dmabuf;
895 		dma_buf_detach(dma_buf, attach);
896 		dma_buf_put(dma_buf);
897 	}
898 
899 	attach = (!reg) ? req->attach_dst : reg->attach_dst;
900 	sgt = (!reg) ? req->sg_dst : reg->sg_dst;
901 	if (attach && sgt)
902 		dma_buf_unmap_attachment(attach, sgt, DMA_BIDIRECTIONAL);
903 	if (attach) {
904 		dma_buf = attach->dmabuf;
905 		dma_buf_detach(dma_buf, attach);
906 		dma_buf_put(dma_buf);
907 	}
908 
909 	return 0;
910 }
911 #endif
912 /* Caller must hold rga_service.lock */
rga_del_running_list(void)913 static void rga_del_running_list(void)
914 {
915     struct rga_reg *reg;
916 
917     while(!list_empty(&rga_service.running))
918     {
919         reg = list_entry(rga_service.running.next, struct rga_reg, status_link);
920 
921         if(reg->MMU_len != 0)
922         {
923             if (rga_mmu_buf.back + reg->MMU_len > 2*rga_mmu_buf.size)
924                 rga_mmu_buf.back = reg->MMU_len + rga_mmu_buf.size;
925             else
926                 rga_mmu_buf.back += reg->MMU_len;
927         }
928 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0))
929 		rga_put_dma_buf(NULL, reg);
930 #endif
931 
932         atomic_sub(1, &reg->session->task_running);
933         atomic_sub(1, &rga_service.total_running);
934 
935         if(list_empty(&reg->session->waiting))
936         {
937             atomic_set(&reg->session->done, 1);
938             wake_up(&reg->session->wait);
939         }
940 
941         rga_reg_deinit(reg);
942     }
943 }
944 
945 /* Caller must hold rga_service.lock */
rga_del_running_list_timeout(void)946 static void rga_del_running_list_timeout(void)
947 {
948     struct rga_reg *reg;
949 
950     while(!list_empty(&rga_service.running))
951     {
952         reg = list_entry(rga_service.running.next, struct rga_reg, status_link);
953 
954         if(reg->MMU_len != 0)
955         {
956             if (rga_mmu_buf.back + reg->MMU_len > 2*rga_mmu_buf.size)
957                 rga_mmu_buf.back = reg->MMU_len + rga_mmu_buf.size;
958             else
959                 rga_mmu_buf.back += reg->MMU_len;
960         }
961 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0))
962 		rga_put_dma_buf(NULL, reg);
963 #endif
964         atomic_sub(1, &reg->session->task_running);
965         atomic_sub(1, &rga_service.total_running);
966 
967         //printk("RGA soft reset for timeout process\n");
968         rga_soft_reset();
969 
970 
971         #if 0
972         printk("RGA_INT is %.8x\n", rga_read(RGA_INT));
973         printk("reg->session->task_running = %d\n", atomic_read(&reg->session->task_running));
974         printk("rga_service.total_running  = %d\n", atomic_read(&rga_service.total_running));
975 
976         print_info(&reg->req);
977 
978         {
979             uint32_t *p, i;
980             p = reg->cmd_reg;
981             for (i=0; i<7; i++)
982                 printk("%.8x %.8x %.8x %.8x\n", p[0 + i*4], p[1+i*4], p[2 + i*4], p[3 + i*4]);
983 
984         }
985         #endif
986 
987         if(list_empty(&reg->session->waiting))
988         {
989             atomic_set(&reg->session->done, 1);
990             wake_up(&reg->session->wait);
991         }
992 
993         rga_reg_deinit(reg);
994     }
995 }
996 
997 #if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 4, 0))
rga_convert_dma_buf(struct rga_req * req)998 static int rga_convert_dma_buf(struct rga_req *req)
999 {
1000 	struct ion_handle *hdl;
1001 	ion_phys_addr_t phy_addr;
1002 	size_t len;
1003 	int ret;
1004 	u32 src_offset, dst_offset;
1005 	void *vaddr;
1006 
1007 	req->sg_src  = NULL;
1008 	req->sg_dst  = NULL;
1009 
1010 	src_offset = req->line_draw_info.flag;
1011 	dst_offset = req->line_draw_info.line_width;
1012 
1013 	if (req->src.yrgb_addr) {
1014 		hdl = ion_import_dma_buf(rga_drvdata->ion_client, req->src.yrgb_addr);
1015 		if (IS_ERR(hdl)) {
1016 		ret = PTR_ERR(hdl);
1017 		pr_err("RGA ERROR ion buf handle\n");
1018 		return ret;
1019 		}
1020 
1021 	if (req->src.uv_addr) {
1022 		if (RGA_TEST_MSG)
1023 			pr_err("WARNING : don't input viraddrs when already input fd !\n");
1024 		req->src.uv_addr = 0;
1025 	}
1026 
1027 #if RGA_DEBUGFS
1028 	if (RGA_CHECK_MODE) {
1029 		vaddr = ion_map_kernel(rga_drvdata->ion_client, hdl);
1030 		if (vaddr)
1031 			rga_memory_check(vaddr, req->src.vir_h, req->src.vir_w,
1032 					req->src.format, req->src.yrgb_addr);
1033 		ion_unmap_kernel(rga_drvdata->ion_client, hdl);
1034 	}
1035 #endif
1036         if ((req->mmu_info.mmu_flag >> 8) & 1) {
1037             req->sg_src = ion_sg_table(rga_drvdata->ion_client, hdl);
1038             req->src.yrgb_addr = req->src.uv_addr;
1039             req->src.uv_addr = req->src.yrgb_addr + (req->src.vir_w * req->src.vir_h);
1040             req->src.v_addr = req->src.uv_addr + (req->src.vir_w * req->src.vir_h)/4;
1041         }
1042         else {
1043             ion_phys(rga_drvdata->ion_client, hdl, &phy_addr, &len);
1044             req->src.yrgb_addr = phy_addr + src_offset;
1045             req->src.uv_addr = req->src.yrgb_addr + (req->src.vir_w * req->src.vir_h);
1046             req->src.v_addr = req->src.uv_addr + (req->src.vir_w * req->src.vir_h)/4;
1047         }
1048         ion_free(rga_drvdata->ion_client, hdl);
1049     }
1050     else {
1051         req->src.yrgb_addr = req->src.uv_addr;
1052         req->src.uv_addr = req->src.yrgb_addr + (req->src.vir_w * req->src.vir_h);
1053         req->src.v_addr = req->src.uv_addr + (req->src.vir_w * req->src.vir_h)/4;
1054     }
1055 
1056     if(req->dst.yrgb_addr) {
1057         hdl = ion_import_dma_buf(rga_drvdata->ion_client, req->dst.yrgb_addr);
1058         if (IS_ERR(hdl)) {
1059             ret = PTR_ERR(hdl);
1060             printk("RGA2 ERROR ion buf handle\n");
1061             return ret;
1062         }
1063 
1064 	if (req->dst.uv_addr) {
1065 		if (RGA_TEST_MSG)
1066 			pr_err("WARNING : don't input viraddrs when already input fd !\n");
1067 		req->dst.uv_addr = 0;
1068 	}
1069 
1070 #if RGA_DEBUGFS
1071 	if (RGA_CHECK_MODE) {
1072 		vaddr = ion_map_kernel(rga_drvdata->ion_client, hdl);
1073 		if (vaddr)
1074 			rga_memory_check(vaddr, req->src.vir_h, req->src.vir_w,
1075 				 req->src.format, req->src.yrgb_addr);
1076 		ion_unmap_kernel(rga_drvdata->ion_client, hdl);
1077 	}
1078 #endif
1079         if ((req->mmu_info.mmu_flag >> 10) & 1) {
1080             req->sg_dst = ion_sg_table(rga_drvdata->ion_client, hdl);
1081             req->dst.yrgb_addr = req->dst.uv_addr;
1082             req->dst.uv_addr = req->dst.yrgb_addr + (req->dst.vir_w * req->dst.vir_h);
1083             req->dst.v_addr = req->dst.uv_addr + (req->dst.vir_w * req->dst.vir_h)/4;
1084         }
1085         else {
1086             ion_phys(rga_drvdata->ion_client, hdl, &phy_addr, &len);
1087             req->dst.yrgb_addr = phy_addr + dst_offset;
1088             req->dst.uv_addr = req->dst.yrgb_addr + (req->dst.vir_w * req->dst.vir_h);
1089             req->dst.v_addr = req->dst.uv_addr + (req->dst.vir_w * req->dst.vir_h)/4;
1090         }
1091         ion_free(rga_drvdata->ion_client, hdl);
1092     }
1093     else {
1094         req->dst.yrgb_addr = req->dst.uv_addr;
1095         req->dst.uv_addr = req->dst.yrgb_addr + (req->dst.vir_w * req->dst.vir_h);
1096         req->dst.v_addr = req->dst.uv_addr + (req->dst.vir_w * req->dst.vir_h)/4;
1097     }
1098 
1099     return 0;
1100 }
1101 #endif
1102 
1103 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0))
rga_get_img_info(rga_img_info_t * img,u8 mmu_flag,struct sg_table ** psgt,struct dma_buf_attachment ** pattach)1104 static int rga_get_img_info(rga_img_info_t *img,
1105 			     u8 mmu_flag,
1106 			     struct sg_table **psgt,
1107 			     struct dma_buf_attachment **pattach)
1108 {
1109 	struct dma_buf_attachment *attach = NULL;
1110 	struct device *rga_dev = NULL;
1111 	struct sg_table *sgt = NULL;
1112 	struct dma_buf *dma_buf = NULL;
1113 	u32 vir_w, vir_h;
1114 	int yrgb_addr = -1;
1115 	int ret = 0;
1116 	void *vaddr = NULL;
1117 
1118 	rga_dev = rga_drvdata->dev;
1119 	yrgb_addr = (int)img->yrgb_addr;
1120 	vir_w = img->vir_w;
1121 	vir_h = img->vir_h;
1122 
1123 	if (yrgb_addr > 0) {
1124 		dma_buf = dma_buf_get(img->yrgb_addr);
1125 		if (IS_ERR(dma_buf)) {
1126 			ret = -EINVAL;
1127 			pr_err("dma_buf_get fail fd[%d]\n", yrgb_addr);
1128 			return ret;
1129 		}
1130 
1131 		attach = dma_buf_attach(dma_buf, rga_dev);
1132 		if (IS_ERR(attach)) {
1133 			dma_buf_put(dma_buf);
1134 			ret = -EINVAL;
1135 			pr_err("Failed to attach dma_buf\n");
1136 			return ret;
1137 		}
1138 #if RGA_DEBUGFS
1139 	if (RGA_CHECK_MODE) {
1140 		vaddr = dma_buf_vmap(dma_buf);
1141 		if (vaddr)
1142 			rga_memory_check(vaddr, img->vir_w, img->vir_h,
1143 					 img->format, img->yrgb_addr);
1144 		dma_buf_vunmap(dma_buf, vaddr);
1145 	}
1146 #endif
1147 		*pattach = attach;
1148 		sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
1149 		if (IS_ERR(sgt)) {
1150 			ret = -EINVAL;
1151 			pr_err("Failed to map src attachment\n");
1152 			goto err_get_sg;
1153 		}
1154 		if (!mmu_flag) {
1155 			ret = -EINVAL;
1156 			pr_err("Fix it please enable iommu flag\n");
1157 			goto err_get_sg;
1158 		}
1159 
1160 		if (mmu_flag) {
1161 			*psgt = sgt;
1162 			img->yrgb_addr = img->uv_addr;
1163 			img->uv_addr = img->yrgb_addr + (vir_w * vir_h);
1164 			img->v_addr = img->uv_addr + (vir_w * vir_h) / 4;
1165 		}
1166 	} else {
1167 		img->yrgb_addr = img->uv_addr;
1168 		img->uv_addr = img->yrgb_addr + (vir_w * vir_h);
1169 		img->v_addr = img->uv_addr + (vir_w * vir_h) / 4;
1170 	}
1171 
1172 	return ret;
1173 
1174 err_get_sg:
1175 	if (sgt)
1176 		dma_buf_unmap_attachment(attach, sgt, DMA_BIDIRECTIONAL);
1177 	if (attach) {
1178 		dma_buf = attach->dmabuf;
1179 		dma_buf_detach(dma_buf, attach);
1180 		*pattach = NULL;
1181 		dma_buf_put(dma_buf);
1182 	}
1183 	return ret;
1184 }
1185 
rga_get_dma_buf(struct rga_req * req)1186 static int rga_get_dma_buf(struct rga_req *req)
1187 {
1188 	struct dma_buf *dma_buf = NULL;
1189 	u8 mmu_flag = 0;
1190 	int ret = 0;
1191 
1192 	req->sg_src = NULL;
1193 	req->sg_dst = NULL;
1194 	req->attach_src = NULL;
1195 	req->attach_dst = NULL;
1196 	mmu_flag = (req->mmu_info.mmu_flag >> 8) & 1;
1197 	ret = rga_get_img_info(&req->src, mmu_flag, &req->sg_src,
1198 				&req->attach_src);
1199 	if (ret) {
1200 		pr_err("src:rga_get_img_info fail\n");
1201 		goto err_src;
1202 	}
1203 
1204 	mmu_flag = (req->mmu_info.mmu_flag >> 10) & 1;
1205 	ret = rga_get_img_info(&req->dst, mmu_flag, &req->sg_dst,
1206 				&req->attach_dst);
1207 	if (ret) {
1208 		pr_err("dst:rga_get_img_info fail\n");
1209 		goto err_dst;
1210 	}
1211 
1212 	return ret;
1213 
1214 err_dst:
1215 	if (req->sg_src && req->attach_src) {
1216 		dma_buf_unmap_attachment(req->attach_src,
1217 					 req->sg_src, DMA_BIDIRECTIONAL);
1218 		dma_buf = req->attach_src->dmabuf;
1219 		dma_buf_detach(dma_buf, req->attach_src);
1220 		dma_buf_put(dma_buf);
1221 	}
1222 err_src:
1223 
1224 	return ret;
1225 }
1226 #endif
rga_reg_init_2(rga_session * session,struct rga_req * req0,struct rga_req * req1)1227 static struct rga_reg *rga_reg_init_2(rga_session *session, struct rga_req *req0,
1228 				      struct rga_req *req1)
1229 {
1230 	int32_t ret;
1231 	struct rga_reg *reg0, *reg1;
1232 
1233 	reg0 = NULL;
1234 	reg1 = NULL;
1235 
1236 	do {
1237 		reg0 = kzalloc(sizeof(*reg0), GFP_KERNEL);
1238 		if (!reg0) {
1239 			pr_err("%s [%d] kmalloc fail in rga_reg_init\n",
1240 			       __func__, __LINE__);
1241 			break;
1242 		}
1243 
1244 		reg1 = kzalloc(sizeof(*reg1), GFP_KERNEL);
1245 		if (!reg1) {
1246 			pr_err("%s [%d] kmalloc fail in rga_reg_init\n",
1247 			       __func__, __LINE__);
1248 			break;
1249 		}
1250 
1251 		reg0->session = session;
1252 		INIT_LIST_HEAD(&reg0->session_link);
1253 		INIT_LIST_HEAD(&reg0->status_link);
1254 
1255 		reg1->session = session;
1256 		INIT_LIST_HEAD(&reg1->session_link);
1257 		INIT_LIST_HEAD(&reg1->status_link);
1258 
1259 		req0->mmu_info.mmu_flag &= (~(1 << 10));
1260 		if (req0->mmu_info.mmu_en) {
1261 			ret = rga_set_mmu_info(reg0, req0);
1262 			if (ret < 0) {
1263 				pr_err("%s, [%d] set mmu info error\n",
1264 				       __func__, __LINE__);
1265 				break;
1266 			}
1267 		}
1268 
1269 		RGA_gen_reg_info(req0, (uint8_t *)reg0->cmd_reg);
1270 		req1->mmu_info.mmu_flag &= (~(1 << 8));
1271 		if (req1->mmu_info.mmu_en) {
1272 			ret = rga_set_mmu_info(reg1, req1);
1273 			if (ret < 0) {
1274 				pr_err("%s, [%d] set mmu info error\n",
1275 				       __func__, __LINE__);
1276 				break;
1277 			}
1278 		}
1279 		RGA_gen_reg_info(req1, (uint8_t *)reg1->cmd_reg);
1280 
1281 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0))
1282 		reg1->sg_src = req1->sg_src;
1283 		reg1->sg_dst = req1->sg_dst;
1284 		reg1->attach_src = req1->attach_src;
1285 		reg1->attach_dst = req1->attach_dst;
1286 #endif
1287 
1288 		mutex_lock(&rga_service.lock);
1289 		list_add_tail(&reg0->status_link, &rga_service.waiting);
1290 		list_add_tail(&reg0->session_link, &session->waiting);
1291 		list_add_tail(&reg1->status_link, &rga_service.waiting);
1292 		list_add_tail(&reg1->session_link, &session->waiting);
1293 		mutex_unlock(&rga_service.lock);
1294 
1295 		return reg1;
1296 
1297 	} while (0);
1298 
1299 	if (reg0)
1300 		kfree(reg0);
1301 	if (reg1)
1302 		kfree(reg1);
1303 	return NULL;
1304 }
1305 #if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 4, 0))
rga_mem_addr_sel(struct rga_req * req)1306 static void rga_mem_addr_sel(struct rga_req *req)
1307 {
1308 	switch (req->src.format) {
1309 	case RK_FORMAT_YCbCr_422_SP:
1310 		break;
1311 	case RK_FORMAT_YCbCr_422_P:
1312 		break;
1313 	case RK_FORMAT_YCbCr_420_SP:
1314 		if ((req->src.yrgb_addr > 0xc0000000) && (req->src.uv_addr > 0xc0000000) &&
1315 		    (req->dst.yrgb_addr > 0xc0000000)) {
1316 			req->src.yrgb_addr = req->src.yrgb_addr - 0x60000000;
1317 			req->src.uv_addr = req->src.uv_addr - 0x60000000;
1318 			req->dst.yrgb_addr = req->dst.yrgb_addr - 0x60000000;
1319 			req->mmu_info.mmu_en = 0;
1320 			req->mmu_info.mmu_flag &= 0xfffe;
1321 	}
1322 		break;
1323 	case RK_FORMAT_YCbCr_420_P:
1324 		break;
1325 	case RK_FORMAT_YCrCb_422_SP:
1326 		break;
1327 	case RK_FORMAT_YCrCb_422_P:
1328 		break;
1329 	case RK_FORMAT_YCrCb_420_SP:
1330 		break;
1331 	case RK_FORMAT_YCrCb_420_P:
1332 		break;
1333 	default:
1334 		break;
1335 	}
1336 }
1337 #endif
1338 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0))
rga_blit(rga_session * session,struct rga_req * req)1339 static int rga_blit(rga_session *session, struct rga_req *req)
1340 {
1341 	int ret = -1;
1342 	int num = 0;
1343 	struct rga_reg *reg;
1344 	struct rga_req req2;
1345 
1346 	uint32_t saw, sah, daw, dah;
1347 
1348 	saw = req->src.act_w;
1349 	sah = req->src.act_h;
1350 	daw = req->dst.act_w;
1351 	dah = req->dst.act_h;
1352 
1353 #if RGA_DEBUGFS
1354 	if (RGA_TEST_MSG)
1355 		print_debug_info(req);
1356 	if (RGA_CHECK_MODE) {
1357 		rga_align_check(req);
1358 		/*rga_scale_check(req);*/
1359 	}
1360 #endif
1361 	if (rga_get_dma_buf(req)) {
1362 		pr_err("RGA : DMA buf copy error\n");
1363 		return -EFAULT;
1364 	}
1365 	req->render_mode &= (~RGA_BUF_GEM_TYPE_MASK);
1366 	do {
1367 	if ((req->render_mode == bitblt_mode) && (((saw >> 1) >= daw) || ((sah >> 1) >= dah))) {
1368 			/* generate 2 cmd for pre scale */
1369 		if (((saw >> 3) > daw) || ((sah >> 3) > dah)) {
1370 			pr_err("unsupported to scaling less than 1/8\n");
1371 			goto err_put_dma_buf;
1372 		}
1373 		if (((daw >> 3) > saw) || ((dah >> 3) > daw)) {
1374 			pr_err("unsupported to scaling more than 8\n");
1375 			goto err_put_dma_buf;
1376 		}
1377 		ret = rga_check_param(req);
1378 		if (ret == -EINVAL) {
1379 			pr_err("req 0 argument is inval\n");
1380 			goto err_put_dma_buf;
1381 		}
1382 
1383 		ret = RGA_gen_two_pro(req, &req2);
1384 		if (ret == -EINVAL) {
1385 			pr_err("RGA_gen_two_pro err\n");
1386 			goto err_put_dma_buf;
1387 		}
1388 
1389 		ret = rga_check_param(req);
1390 		if (ret == -EINVAL) {
1391 			pr_err("req 1 argument is inval\n");
1392 			goto err_put_dma_buf;
1393 		}
1394 
1395 		ret = rga_check_param(&req2);
1396 		if (ret == -EINVAL) {
1397 			pr_err("req 2 argument is inval\n");
1398 			goto err_put_dma_buf;
1399 		}
1400 
1401 		reg = rga_reg_init_2(session, req, &req2);
1402 		if (!reg) {
1403 			pr_err("init2 reg fail\n");
1404 			goto err_put_dma_buf;
1405 		}
1406 		num = 2;
1407 	} else {
1408 		/* check value if legal */
1409 		ret = rga_check_param(req);
1410 		if (ret == -EINVAL) {
1411 			pr_err("req argument is inval\n");
1412 			goto err_put_dma_buf;
1413 		}
1414 
1415 		reg = rga_reg_init(session, req);
1416 		if (!reg) {
1417 			pr_err("init reg fail\n");
1418 			goto err_put_dma_buf;
1419 		}
1420 
1421 		num = 1;
1422 	}
1423 
1424 	mutex_lock(&rga_service.lock);
1425 	atomic_add(num, &rga_service.total_running);
1426 	rga_try_set_reg();
1427 	mutex_unlock(&rga_service.lock);
1428 	return 0;
1429 
1430 	} while (0);
1431 
1432 err_put_dma_buf:
1433 	rga_put_dma_buf(req, NULL);
1434 
1435 	return -EFAULT;
1436 }
1437 #else
rga_blit(rga_session * session,struct rga_req * req)1438 static int rga_blit(rga_session *session, struct rga_req *req)
1439 {
1440 	int ret = -1;
1441 	int num = 0;
1442 	struct rga_reg *reg;
1443 	struct rga_req req2;
1444 	uint32_t saw, sah, daw, dah;
1445 
1446 	saw = req->src.act_w;
1447 	sah = req->src.act_h;
1448 	daw = req->dst.act_w;
1449 	dah = req->dst.act_h;
1450 
1451 #if RGA_DEBUGFS
1452 	if (RGA_TEST_MSG)
1453 		print_debug_info(req);
1454 	if (RGA_CHECK_MODE) {
1455 		rga_align_check(req);
1456 		/*rga_scale_check(req);*/
1457 	}
1458 #endif
1459 	if (rga_convert_dma_buf(req)) {
1460 		pr_err("RGA : DMA buf copy error\n");
1461 		return -EFAULT;
1462 	}
1463 	do {
1464 	if ((req->render_mode == bitblt_mode) && (((saw >> 1) >= daw) || ((sah >> 1) >= dah))) {
1465 		/* generate 2 cmd for pre scale */
1466 		ret = rga_check_param(req);
1467 		if (ret == -EINVAL) {
1468 			pr_err("req 0 argument is inval\n");
1469 			break;
1470 		}
1471 
1472 		ret = RGA_gen_two_pro(req, &req2);
1473 		if (ret == -EINVAL)
1474 			break;
1475 
1476 		ret = rga_check_param(req);
1477 		if (ret == -EINVAL) {
1478 			pr_err("req 1 argument is inval\n");
1479 			break;
1480 		}
1481 
1482 		ret = rga_check_param(&req2);
1483 		if (ret == -EINVAL) {
1484 			pr_err("req 2 argument is inval\n");
1485 			break;
1486 		}
1487 
1488 		reg = rga_reg_init_2(session, req, &req2);
1489 		if (!reg)
1490 			break;
1491 		num = 2;
1492 
1493 	} else {
1494 		/* check value if legal */
1495 		ret = rga_check_param(req);
1496 		if (ret == -EINVAL) {
1497 			pr_err("req argument is inval\n");
1498 			break;
1499 		}
1500 
1501 		if (req->render_mode == bitblt_mode)
1502 			rga_mem_addr_sel(req);
1503 
1504 		reg = rga_reg_init(session, req);
1505 		if (!reg)
1506 			break;
1507 		num = 1;
1508 	}
1509 
1510 	mutex_lock(&rga_service.lock);
1511 	atomic_add(num, &rga_service.total_running);
1512 	rga_try_set_reg();
1513 	mutex_unlock(&rga_service.lock);
1514 
1515 	return 0;
1516 	} while (0);
1517 
1518 	return -EFAULT;
1519 }
1520 #endif
1521 
rga_blit_async(rga_session * session,struct rga_req * req)1522 static int rga_blit_async(rga_session *session, struct rga_req *req)
1523 {
1524 	int ret = -1;
1525 
1526 #if RGA_DEBUGFS
1527 	if (RGA_TEST_MSG)
1528 		DBG("*** rga_blit_async proc ***\n");
1529 #endif
1530 	atomic_set(&session->done, 0);
1531 	ret = rga_blit(session, req);
1532 	return ret;
1533 }
1534 
rga_blit_sync(rga_session * session,struct rga_req * req)1535 static int rga_blit_sync(rga_session *session, struct rga_req *req)
1536 {
1537     int ret = -1;
1538     int ret_timeout = 0;
1539 
1540 #if RGA_DEBUGFS
1541 	if (RGA_TEST_MSG)
1542 		DBG("*** rga_blit_sync proc ***\n");
1543 #endif
1544 
1545     atomic_set(&session->done, 0);
1546     ret = rga_blit(session, req);
1547     if(ret < 0)
1548         return ret;
1549 
1550     ret_timeout = wait_event_timeout(session->wait, atomic_read(&session->done), RGA_TIMEOUT_DELAY);
1551 
1552     if (unlikely(ret_timeout< 0)) {
1553         mutex_lock(&rga_service.lock);
1554         rga_del_running_list();
1555         mutex_unlock(&rga_service.lock);
1556         ret = ret_timeout;
1557 	}
1558     else if (0 == ret_timeout) {
1559         mutex_lock(&rga_service.lock);
1560         rga_del_running_list_timeout();
1561         rga_try_set_reg();
1562         mutex_unlock(&rga_service.lock);
1563 		ret = -ETIMEDOUT;
1564 	}
1565 
1566 #if RGA_DEBUGFS
1567 	if (RGA_TEST_TIME) {
1568 		rga_end = ktime_get();
1569 		rga_end = ktime_sub(rga_end, rga_start);
1570 		DBG("sync one cmd end time %d us\n", (int)ktime_to_us(rga_end));
1571 	}
1572 #endif
1573 
1574     return ret;
1575 }
1576 
1577 
rga_ioctl(struct file * file,uint32_t cmd,unsigned long arg)1578 static long rga_ioctl(struct file *file, uint32_t cmd, unsigned long arg)
1579 {
1580     struct rga_req req;
1581 	int ret = 0;
1582     rga_session *session;
1583 
1584 	memset(&req, 0x0, sizeof(req));
1585     mutex_lock(&rga_service.mutex);
1586 
1587     session = (rga_session *)file->private_data;
1588 
1589 	if (NULL == session) {
1590         printk("%s [%d] rga thread session is null\n",__FUNCTION__,__LINE__);
1591         mutex_unlock(&rga_service.mutex);
1592 		return -EINVAL;
1593 	}
1594 
1595 	memset(&req, 0x0, sizeof(req));
1596 #if RGA_DEBUGFS
1597 	if (RGA_TEST_MSG)
1598 		DBG("cmd is %s(0x%x)\n", rga_get_cmd_mode_str(cmd), cmd);
1599 	if (RGA_NONUSE) {
1600 		mutex_unlock(&rga_service.mutex);
1601 		return 0;
1602 	}
1603 #endif
1604 	switch (cmd) {
1605 		case RGA_BLIT_SYNC:
1606     		if (unlikely(copy_from_user(&req, (struct rga_req*)arg, sizeof(struct rga_req))))
1607             {
1608         		ERR("copy_from_user failed\n");
1609         		ret = -EFAULT;
1610                 break;
1611         	}
1612             ret = rga_blit_sync(session, &req);
1613             break;
1614 		case RGA_BLIT_ASYNC:
1615     		if (unlikely(copy_from_user(&req, (struct rga_req*)arg, sizeof(struct rga_req))))
1616             {
1617         		ERR("copy_from_user failed\n");
1618         		ret = -EFAULT;
1619                 break;
1620         	}
1621 
1622             if((atomic_read(&rga_service.total_running) > 16))
1623             {
1624 			    ret = rga_blit_sync(session, &req);
1625             }
1626             else
1627             {
1628                 ret = rga_blit_async(session, &req);
1629             }
1630 			break;
1631 		case RGA_FLUSH:
1632 			ret = rga_flush(session, arg);
1633 			break;
1634         case RGA_GET_RESULT:
1635             ret = rga_get_result(session, arg);
1636             break;
1637         case RGA_GET_VERSION:
1638 		if (!rga_drvdata->version) {
1639 			rga_drvdata->version = kzalloc(16, GFP_KERNEL);
1640 			if (!rga_drvdata->version) {
1641 				ret = -ENOMEM;
1642 				break;
1643 			}
1644 			rga_power_on();
1645 			udelay(1);
1646 			if (rga_read(RGA_VERSION) == 0x02018632)
1647 				snprintf(rga_drvdata->version, 16, "1.6");
1648 			else
1649 				snprintf(rga_drvdata->version, 16, "1.003");
1650 		}
1651 
1652 			ret = copy_to_user((void *)arg, rga_drvdata->version, 16);
1653             break;
1654 		default:
1655 			ret = -EINVAL;
1656 			break;
1657 	}
1658 
1659 	mutex_unlock(&rga_service.mutex);
1660 
1661 	return ret;
1662 }
1663 
1664 
rga_ioctl_kernel(struct rga_req * req)1665 long rga_ioctl_kernel(struct rga_req *req)
1666 {
1667 	int ret = 0;
1668     if (!rga_ioctl_kernel_p) {
1669         printk("rga_ioctl_kernel_p is NULL\n");
1670         return -1;
1671     }
1672     else {
1673         ret = (*rga_ioctl_kernel_p)(req);
1674 	    return ret;
1675     }
1676 }
1677 
1678 
rga_ioctl_kernel_imp(struct rga_req * req)1679 long rga_ioctl_kernel_imp(struct rga_req *req)
1680 {
1681 	int ret = 0;
1682     rga_session *session;
1683 
1684     mutex_lock(&rga_service.mutex);
1685 
1686     session = &rga_session_global;
1687 
1688 	if (NULL == session) {
1689         printk("%s [%d] rga thread session is null\n",__FUNCTION__,__LINE__);
1690         mutex_unlock(&rga_service.mutex);
1691 		return -EINVAL;
1692 	}
1693 
1694     ret = rga_blit_sync(session, req);
1695 
1696 	mutex_unlock(&rga_service.mutex);
1697 
1698 	return ret;
1699 }
1700 
1701 
rga_open(struct inode * inode,struct file * file)1702 static int rga_open(struct inode *inode, struct file *file)
1703 {
1704     rga_session *session = kzalloc(sizeof(rga_session), GFP_KERNEL);
1705 	if (NULL == session) {
1706 		pr_err("unable to allocate memory for rga_session.");
1707 		return -ENOMEM;
1708 	}
1709 
1710 	session->pid = current->pid;
1711     //printk(KERN_DEBUG  "+");
1712 
1713 	INIT_LIST_HEAD(&session->waiting);
1714 	INIT_LIST_HEAD(&session->running);
1715 	INIT_LIST_HEAD(&session->list_session);
1716 	init_waitqueue_head(&session->wait);
1717 	mutex_lock(&rga_service.lock);
1718 	list_add_tail(&session->list_session, &rga_service.session);
1719 	mutex_unlock(&rga_service.lock);
1720 	atomic_set(&session->task_running, 0);
1721     atomic_set(&session->num_done, 0);
1722 
1723 	file->private_data = (void *)session;
1724 
1725     //DBG("*** rga dev opened by pid %d *** \n", session->pid);
1726 	return nonseekable_open(inode, file);
1727 
1728 }
1729 
rga_release(struct inode * inode,struct file * file)1730 static int rga_release(struct inode *inode, struct file *file)
1731 {
1732     int task_running;
1733 	rga_session *session = (rga_session *)file->private_data;
1734 	if (NULL == session)
1735 		return -EINVAL;
1736     //printk(KERN_DEBUG  "-");
1737 	task_running = atomic_read(&session->task_running);
1738 
1739     if (task_running)
1740     {
1741 		pr_err("rga_service session %d still has %d task running when closing\n", session->pid, task_running);
1742 		msleep(100);
1743 	}
1744 
1745 	wake_up(&session->wait);
1746 	mutex_lock(&rga_service.lock);
1747 	list_del(&session->list_session);
1748 	rga_service_session_clear(session);
1749 	kfree(session);
1750 	mutex_unlock(&rga_service.lock);
1751 
1752     //DBG("*** rga dev close ***\n");
1753 	return 0;
1754 }
1755 
rga_irq_thread(int irq,void * dev_id)1756 static irqreturn_t rga_irq_thread(int irq, void *dev_id)
1757 {
1758 #if RGA_DEBUGFS
1759 	if (RGA_INT_FLAG)
1760 		DBG("irqthread INT[%x], STATS[%x]\n", rga_read(RGA_INT), rga_read(RGA_STATUS));
1761 #endif
1762 	mutex_lock(&rga_service.lock);
1763 	if (rga_service.enable) {
1764 		rga_del_running_list();
1765 		rga_try_set_reg();
1766 	}
1767 	mutex_unlock(&rga_service.lock);
1768 
1769 	return IRQ_HANDLED;
1770 }
1771 
rga_irq(int irq,void * dev_id)1772 static irqreturn_t rga_irq(int irq,  void *dev_id)
1773 {
1774 #if RGA_DEBUGFS
1775 	if (RGA_INT_FLAG)
1776 		DBG("irq INT[%x], STATS[%x]\n", rga_read(RGA_INT), rga_read(RGA_STATUS));
1777 #endif
1778 	/*if error interrupt then soft reset hardware*/
1779 	if (rga_read(RGA_INT) & 0x03) {
1780 		pr_err("Err irq INT[%x], STATS[%x]\n", rga_read(RGA_INT), rga_read(RGA_STATUS));
1781 		rga_soft_reset();
1782 	}
1783 	/*clear INT */
1784 	rga_write(rga_read(RGA_INT) | (0x1<<6) | (0x1<<7) | (0x1<<5) | (0x1<<4), RGA_INT);
1785 
1786 	return IRQ_WAKE_THREAD;
1787 }
1788 
1789 struct file_operations rga_fops = {
1790 	.owner		= THIS_MODULE,
1791 	.open		= rga_open,
1792 	.release	= rga_release,
1793 	.unlocked_ioctl		= rga_ioctl,
1794 };
1795 
1796 static struct miscdevice rga_dev ={
1797     .minor = RGA_MAJOR,
1798     .name  = "rga",
1799     .fops  = &rga_fops,
1800 };
1801 
1802 #if defined(CONFIG_OF)
1803 static const struct of_device_id rockchip_rga_dt_ids[] = {
1804 	{ .compatible = "rockchip,rk312x-rga", },
1805 	{},
1806 };
1807 #endif
1808 
rga_drv_probe(struct platform_device * pdev)1809 static int rga_drv_probe(struct platform_device *pdev)
1810 {
1811 	struct rga_drvdata *data;
1812     struct resource *res;
1813     //struct device_node *np = pdev->dev.of_node;
1814 	int ret = 0;
1815 
1816 	mutex_init(&rga_service.lock);
1817 	mutex_init(&rga_service.mutex);
1818 	atomic_set(&rga_service.total_running, 0);
1819 	rga_service.enable = false;
1820 
1821     rga_ioctl_kernel_p = rga_ioctl_kernel_imp;
1822 
1823 	data = devm_kzalloc(&pdev->dev, sizeof(struct rga_drvdata), GFP_KERNEL);
1824 	if(! data) {
1825 		ERR("failed to allocate driver data.\n");
1826 		return -ENOMEM;
1827 	}
1828 
1829 	INIT_DELAYED_WORK(&data->power_off_work, rga_power_off_work);
1830 	wake_lock_init(&data->wake_lock, WAKE_LOCK_SUSPEND, "rga");
1831 
1832 #if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 4, 0))
1833 	data->pd_rga = devm_clk_get(&pdev->dev, "pd_rga");
1834 	if (IS_ERR(data->pd_rga)) {
1835 		dev_err(&pdev->dev, "Failed to get rga power domain");
1836 		data->pd_rga = NULL;
1837 	}
1838 #endif
1839     data->aclk_rga = devm_clk_get(&pdev->dev, "aclk_rga");
1840     data->hclk_rga = devm_clk_get(&pdev->dev, "hclk_rga");
1841 
1842     /* map the registers */
1843 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1844 	data->rga_base = devm_ioremap_resource(&pdev->dev, res);
1845 	if (!data->rga_base) {
1846 		ERR("rga ioremap failed\n");
1847 		ret = -ENOENT;
1848 		goto err_ioremap;
1849 	}
1850 
1851 	/* get the IRQ */
1852 	data->irq = ret = platform_get_irq(pdev, 0);
1853 	if (ret <= 0) {
1854 		ERR("failed to get rga irq resource (%d).\n", data->irq);
1855 		ret = data->irq;
1856 		goto err_irq;
1857 	}
1858 
1859 	/* request the IRQ */
1860 	//ret = request_threaded_irq(data->irq, rga_irq, rga_irq_thread, 0, "rga", pdev);
1861     ret = devm_request_threaded_irq(&pdev->dev, data->irq, rga_irq, rga_irq_thread, 0, "rga", data);
1862 	if (ret)
1863 	{
1864 		ERR("rga request_irq failed (%d).\n", ret);
1865 		goto err_irq;
1866 	}
1867 
1868 	platform_set_drvdata(pdev, data);
1869 	data->dev = &pdev->dev;
1870 	rga_drvdata = data;
1871 
1872     #if defined(CONFIG_ION_ROCKCHIP)
1873 	data->ion_client = rockchip_ion_client_create("rga");
1874 	if (IS_ERR(data->ion_client)) {
1875 		dev_err(&pdev->dev, "failed to create ion client for rga");
1876 		return PTR_ERR(data->ion_client);
1877 	} else {
1878 		dev_info(&pdev->dev, "rga ion client create success!\n");
1879 	}
1880     #endif
1881 
1882 	ret = misc_register(&rga_dev);
1883 	if(ret)
1884 	{
1885 		ERR("cannot register miscdev (%d)\n", ret);
1886 		goto err_misc_register;
1887 	}
1888 
1889 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0))
1890 	pm_runtime_enable(&pdev->dev);
1891 #endif
1892 
1893 	pr_info("Driver loaded successfully\n");
1894 
1895 	return 0;
1896 
1897 err_misc_register:
1898 	free_irq(data->irq, pdev);
1899 err_irq:
1900 	iounmap(data->rga_base);
1901 err_ioremap:
1902 	wake_lock_destroy(&data->wake_lock);
1903 	//kfree(data);
1904 
1905 	return ret;
1906 }
1907 
rga_drv_remove(struct platform_device * pdev)1908 static int rga_drv_remove(struct platform_device *pdev)
1909 {
1910 	struct rga_drvdata *data = platform_get_drvdata(pdev);
1911 	DBG("%s [%d]\n",__FUNCTION__,__LINE__);
1912 
1913 	wake_lock_destroy(&data->wake_lock);
1914 	misc_deregister(&(data->miscdev));
1915 	free_irq(data->irq, &data->miscdev);
1916 	iounmap((void __iomem *)(data->rga_base));
1917 	kfree(data->version);
1918 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0))
1919 	devm_clk_put(&pdev->dev, data->aclk_rga);
1920 	devm_clk_put(&pdev->dev, data->hclk_rga);
1921 	pm_runtime_disable(&pdev->dev);
1922 #else
1923 	if (data->pd_rga)
1924 		devm_clk_put(&pdev->dev, data->pd_rga);
1925 	devm_clk_put(&pdev->dev, data->aclk_rga);
1926 	devm_clk_put(&pdev->dev, data->hclk_rga);
1927 #endif
1928 	//clk_put(data->pd_rga);
1929 
1930 	//kfree(data);
1931 	return 0;
1932 }
1933 
1934 static struct platform_driver rga_driver = {
1935 	.probe		= rga_drv_probe,
1936 	.remove		= rga_drv_remove,
1937 	.driver		= {
1938 		.owner  = THIS_MODULE,
1939 		.name	= "rga",
1940 		.of_match_table = of_match_ptr(rockchip_rga_dt_ids),
1941 	},
1942 };
1943 
1944 #if RGA_DEBUGFS
1945 void rga_slt(void);
1946 
rga_debug_show(struct seq_file * m,void * data)1947 static int rga_debug_show(struct seq_file *m, void *data)
1948 {
1949 	seq_puts(m, "echo reg > rga to open rga reg MSG\n");
1950 	seq_puts(m, "echo msg  > rga to open rga msg MSG\n");
1951 	seq_puts(m, "echo time > rga to open rga time MSG\n");
1952 	seq_puts(m, "echo check > rga to open rga check flag\n");
1953 	seq_puts(m, "echo int > rga to open rga int flag\n");
1954 	seq_puts(m, "echo stop > rga to stop using hardware\n");
1955 	return 0;
1956 }
1957 
rga_debug_write(struct file * file,const char __user * ubuf,size_t len,loff_t * offp)1958 static ssize_t rga_debug_write(struct file *file, const char __user *ubuf,
1959 			      size_t len, loff_t *offp)
1960 {
1961 	char buf[14];
1962 
1963 	if (len > sizeof(buf) - 1)
1964 		return -EINVAL;
1965 	if (copy_from_user(buf, ubuf, len))
1966 		return -EFAULT;
1967 	buf[len - 1] = '\0';
1968 	if (strncmp(buf, "reg", 4) == 0) {
1969 		if (RGA_TEST_REG) {
1970 			RGA_TEST_REG = 0;
1971 			DBG("close rga reg!\n");
1972 		} else {
1973 			RGA_TEST_REG = 1;
1974 			DBG("open rga reg!\n");
1975 		}
1976 	} else if (strncmp(buf, "msg", 3) == 0) {
1977 		if (RGA_TEST_MSG) {
1978 			RGA_TEST_MSG = 0;
1979 			DBG("close rga test MSG!\n");
1980 		} else {
1981 			RGA_TEST_MSG = 1;
1982 			DBG("open rga test MSG!\n");
1983 		}
1984 	} else if (strncmp(buf, "time", 4) == 0) {
1985 		if (RGA_TEST_TIME) {
1986 			RGA_TEST_TIME = 0;
1987 			DBG("close rga test time!\n");
1988 		} else {
1989 			RGA_TEST_TIME = 1;
1990 			DBG("open rga test time!\n");
1991 		}
1992 	} else if (strncmp(buf, "check", 5) == 0) {
1993 		if (RGA_CHECK_MODE) {
1994 			RGA_CHECK_MODE = 0;
1995 			DBG("close rga check mode!\n");
1996 		} else {
1997 			RGA_CHECK_MODE = 1;
1998 			DBG("open rga check mode!\n");
1999 		}
2000 	} else if (strncmp(buf, "stop", 4) == 0) {
2001 		if (RGA_NONUSE) {
2002 			RGA_NONUSE = 0;
2003 			DBG("stop using rga hardware!\n");
2004 		} else {
2005 			RGA_NONUSE = 1;
2006 			DBG("use  rga hardware!\n");
2007 		}
2008 	} else if (strncmp(buf, "int", 3) == 0) {
2009 		if (RGA_INT_FLAG) {
2010 			RGA_INT_FLAG = 0;
2011 			DBG("close rga interuppt mesg!\n");
2012 		} else {
2013 			RGA_INT_FLAG = 1;
2014 			DBG("open rga interuppt mesg!\n");
2015 		}
2016 	} else if (strncmp(buf, "slt", 3) == 0) {
2017 		rga_slt();
2018 	}
2019 	return len;
2020 }
2021 
rga_debug_open(struct inode * inode,struct file * file)2022 static int rga_debug_open(struct inode *inode, struct file *file)
2023 
2024 {
2025 	return single_open(file, rga_debug_show, NULL);
2026 }
2027 
2028 static const struct file_operations rga_debug_fops = {
2029 	.owner = THIS_MODULE,
2030 	.open = rga_debug_open,
2031 	.read = seq_read,
2032 	.llseek = seq_lseek,
2033 	.release = single_release,
2034 	.write = rga_debug_write,
2035 };
2036 
rga_debugfs_add(void)2037 static void rga_debugfs_add(void)
2038 {
2039 	struct dentry *rga_debug_root;
2040 	struct dentry *ent;
2041 
2042 	rga_debug_root = debugfs_create_dir("rga_debug", NULL);
2043 
2044 	ent = debugfs_create_file("rga", 0644, rga_debug_root,
2045 				  NULL, &rga_debug_fops);
2046 	if (!ent) {
2047 		pr_err("create rga_debugfs err\n");
2048 		debugfs_remove_recursive(rga_debug_root);
2049 	}
2050 }
2051 
2052 #if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 4, 0))
rga_slt(void)2053 void rga_slt(void)
2054 {
2055 	struct rga_req req;
2056 	rga_session session;
2057 	void *src_vir, *dst_vir;
2058 	unsigned int *src, *dst;
2059 	ion_phys_addr_t src_phy, dst_phy;
2060 	int i;
2061 	unsigned int srcW, srcH, dstW, dstH;
2062 	struct ion_handle *src_handle;
2063 	struct ion_handle *dst_handle;
2064 	struct rga_drvdata *data;
2065 	unsigned int srclen, dstlen;
2066 	int err_count = 0;
2067 	int right_count = 0;
2068 	int size;
2069 	unsigned int *pstd;
2070 	unsigned int *pnow;
2071 
2072 	data = rga_drvdata;
2073 	srcW = 1280;
2074 	srcH = 720;
2075 	dstW = 1280;
2076 	dstH = 720;
2077 	src_handle = ion_alloc(data->ion_client, (size_t)srcW * srcH * 4, 0,
2078 		   ION_HEAP(ION_CMA_HEAP_ID), 0);
2079 
2080 	dst_handle = ion_alloc(data->ion_client, (size_t)dstW * dstH * 4, 0,
2081 		   ION_HEAP(ION_CMA_HEAP_ID), 0);
2082 
2083 	session.pid	= current->pid;
2084 	INIT_LIST_HEAD(&session.waiting);
2085 	INIT_LIST_HEAD(&session.running);
2086 	INIT_LIST_HEAD(&session.list_session);
2087 	init_waitqueue_head(&session.wait);
2088 	/* no need to protect */
2089 	list_add_tail(&session.list_session, &rga_service.session);
2090 	atomic_set(&session.task_running, 0);
2091 	atomic_set(&session.num_done, 0);
2092 
2093 	src_vir = ion_map_kernel(data->ion_client, src_handle);
2094 	dst_vir = ion_map_kernel(data->ion_client, dst_handle);
2095 
2096 	ion_phys(data->ion_client, src_handle, &src_phy, &srclen);
2097 	ion_phys(data->ion_client, dst_handle, &dst_phy, &dstlen);
2098 
2099 	memset(&req, 0, sizeof(struct rga_req));
2100 	src = (unsigned int *)src_vir;
2101 	dst = (unsigned int *)dst_vir;
2102 
2103 	memset(src_vir, 0x80, srcW * srcH * 4);
2104 
2105 	DBG("\n********************************\n");
2106 	DBG("************ RGA_TEST ************\n");
2107 	DBG("********************************\n\n");
2108 
2109 	req.src.act_w = srcW;
2110 	req.src.act_h = srcH;
2111 
2112 	req.src.vir_w = srcW;
2113 	req.src.vir_h = srcW;
2114 	req.src.yrgb_addr = 0;
2115 	req.src.uv_addr = src_phy;
2116 	req.src.v_addr = src_phy + srcH * srcW;
2117 	req.src.format = RK_FORMAT_RGBA_8888;
2118 
2119 	req.dst.act_w = dstW;
2120 	req.dst.act_h = dstH;
2121 
2122 	req.dst.vir_w = dstW;
2123 	req.dst.vir_h = dstH;
2124 	req.dst.x_offset = 0;
2125 	req.dst.y_offset = 0;
2126 
2127 	req.dst.yrgb_addr = 0;
2128 	req.dst.uv_addr = dst_phy;
2129 	req.dst.v_addr = dst_phy + dstH * dstW;
2130 
2131 	req.dst.format = RK_FORMAT_RGBA_8888;
2132 
2133 	req.clip.xmin = 0;
2134 	req.clip.xmax = dstW - 1;
2135 	req.clip.ymin = 0;
2136 	req.clip.ymax = dstH - 1;
2137 
2138 	rga_blit_sync(&session, &req);
2139 
2140 	size = dstW * dstH * 4;
2141 	pstd = (unsigned int *)src_vir;
2142 	pnow = (unsigned int *)dst_vir;
2143 
2144 	DBG("[  num   : srcInfo    dstInfo ]\n");
2145 	for (i = 0; i < size / 4; i++) {
2146 		if (*pstd != *pnow) {
2147 			DBG("[X%.8d:0x%x 0x%x]", i, *pstd, *pnow);
2148 			if (i % 4 == 0)
2149 				DBG("\n");
2150 			err_count++;
2151 		} else {
2152 			if (i % (640 * 1024) == 0)
2153 				DBG("[Y%.8d:0x%.8x 0x%.8x]\n", i,
2154 				    *pstd, *pnow);
2155 			right_count++;
2156 		}
2157 	pstd++;
2158 	pnow++;
2159 	if (err_count > 64)
2160 		break;
2161 	}
2162 
2163 	DBG("err_count=%d,right_count=%d\n", err_count, right_count);
2164 	if (err_count != 0)
2165 		DBG("rga slt err !!\n");
2166 	else
2167 		DBG("rga slt success !!\n");
2168 
2169 	ion_unmap_kernel(data->ion_client, src_handle);
2170 	ion_unmap_kernel(data->ion_client, dst_handle);
2171 
2172 	ion_free(data->ion_client, src_handle);
2173 	ion_free(data->ion_client, dst_handle);
2174 }
2175 #else
2176 unsigned long src1_buf[400 * 200];
2177 unsigned long dst1_buf[400 * 200];
rga_slt(void)2178 void rga_slt(void)
2179 {
2180 	struct rga_req req;
2181 	rga_session session;
2182 	unsigned long *src_vir, *dst_vir;
2183 	int i;
2184 	unsigned int srcW, srcH, dstW, dstH;
2185 	int err_count = 0;
2186 	int right_count = 0;
2187 	int size;
2188 	unsigned int *pstd;
2189 	unsigned int *pnow;
2190 
2191 	srcW = 400;
2192 	srcH = 200;
2193 	dstW = 400;
2194 	dstH = 200;
2195 
2196 	session.pid	= current->pid;
2197 	INIT_LIST_HEAD(&session.waiting);
2198 	INIT_LIST_HEAD(&session.running);
2199 	INIT_LIST_HEAD(&session.list_session);
2200 	init_waitqueue_head(&session.wait);
2201 	/* no need to protect */
2202 	list_add_tail(&session.list_session, &rga_service.session);
2203 	atomic_set(&session.task_running, 0);
2204 	atomic_set(&session.num_done, 0);
2205 
2206 	memset(&req, 0, sizeof(struct rga_req));
2207 	src_vir = src1_buf;
2208 	dst_vir = dst1_buf;
2209 
2210 	memset(src1_buf, 0x50, 400 * 200 * 4);
2211 	memset(dst1_buf, 0x00, 400 * 200 * 4);
2212 
2213 	rga_dma_flush_range(&src1_buf[0], &src1_buf[400 * 200]);
2214 
2215 	DBG("\n********************************\n");
2216 	DBG("************ RGA_TEST ************\n");
2217 	DBG("********************************\n\n");
2218 
2219 	req.src.act_w = srcW;
2220 	req.src.act_h = srcH;
2221 
2222 	req.src.vir_w = srcW;
2223 	req.src.vir_h = srcW;
2224 	req.src.yrgb_addr = 0;
2225 	req.src.uv_addr = (unsigned long)virt_to_phys(src_vir);
2226 	req.src.v_addr = req.src.uv_addr + srcH * srcW;
2227 	req.src.format = RK_FORMAT_RGBA_8888;
2228 
2229 	req.dst.act_w = dstW;
2230 	req.dst.act_h = dstH;
2231 
2232 	req.dst.vir_w = dstW;
2233 	req.dst.vir_h = dstH;
2234 	req.dst.x_offset = 0;
2235 	req.dst.y_offset = 0;
2236 
2237 	req.dst.yrgb_addr = 0;
2238 	req.dst.uv_addr = (unsigned long)virt_to_phys(dst_vir);
2239 	req.dst.v_addr = req.dst.uv_addr + dstH * dstW;
2240 
2241 	req.dst.format = RK_FORMAT_RGBA_8888;
2242 	rga_blit_sync(&session, &req);
2243 	size = dstW * dstH * 4;
2244 	pstd = (unsigned int *)src_vir;
2245 	pnow = (unsigned int *)dst_vir;
2246 
2247 	DBG("[  num   : srcInfo    dstInfo ]\n");
2248 	for (i = 0; i < size / 4; i++) {
2249 		if (*pstd != *pnow) {
2250 			DBG("[X%.8d:0x%x 0x%x]", i, *pstd, *pnow);
2251 			if (i % 4 == 0)
2252 				DBG("\n");
2253 			err_count++;
2254 		} else {
2255 			if (i % (640 * 1024) == 0)
2256 				DBG("[Y%.8d:0x%.8x 0x%.8x]\n", i,
2257 				    *pstd, *pnow);
2258 			right_count++;
2259 		}
2260 	pstd++;
2261 	pnow++;
2262 	if (err_count > 64)
2263 		break;
2264 	}
2265 
2266 	DBG("err_count=%d, right_count=%d\n", err_count, right_count);
2267 	if (err_count != 0)
2268 		DBG("rga slt err !!\n");
2269 	else
2270 		DBG("rga slt success !!\n");
2271 }
2272 #endif
2273 #endif
2274 
2275 void rga_test_0(void);
2276 void rga_test_1(void);
2277 
rga_init(void)2278 static int __init rga_init(void)
2279 {
2280 	int i, ret;
2281 	void * pre_scale_page_buf;
2282 	uint32_t *pre_scale_page_table;
2283 	uint32_t *mmu_base;
2284 	struct page **pages;
2285 
2286 	/* malloc pre scale mid buf mmu table */
2287 	pre_scale_page_table = kzalloc(RGA_PRE_SCALE_PAGE_SIZE * sizeof(*pre_scale_page_table),
2288 				       GFP_KERNEL);
2289 	if(pre_scale_page_table == NULL) {
2290 		pr_err("RGA alloc pre-scale page table failed.\n");
2291 		return -ENOMEM;
2292 	}
2293 
2294 	/* alloc reserved pre-scale buf */
2295 	for(i = 0; i < RGA_PRE_SCALE_PAGE_SIZE; i++) {
2296 		pre_scale_page_buf = (void *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
2297 		if(pre_scale_page_buf == NULL) {
2298 			printk(KERN_ERR "RGA init pre scale page_table[%d] falied\n", i);
2299 			ret = -ENOMEM;
2300 			goto free_pre_scale_page_table;
2301 		}
2302 		pre_scale_page_table[i] = (uint32_t)virt_to_phys(pre_scale_page_buf);
2303 	}
2304 
2305 	mmu_base = kmalloc(1024 * 256, GFP_KERNEL);
2306 	if (mmu_base == NULL) {
2307 		pr_err("RGA alloc mmu buffer failed.\n");
2308 		ret = -ENOMEM;
2309 		goto free_pre_scale_page_table;
2310 	}
2311 
2312 	pages = kmalloc((32768)* sizeof(struct page *), GFP_KERNEL);
2313 	if (pages == NULL) {
2314 		pr_err("RGA alloc pages buffer failed.\n");
2315 		ret = -ENOMEM;
2316 		goto free_mmu_base;
2317 	}
2318 
2319 	ret = platform_driver_register(&rga_driver);
2320 	if (ret != 0) {
2321 		printk(KERN_ERR "Platform device register failed (%d).\n", ret);
2322 		goto free_pages_buf;
2323 	}
2324 
2325 	rga_service.pre_scale_buf = pre_scale_page_table;
2326 
2327 	rga_mmu_buf.buf_virtual = mmu_base;
2328 #if (defined(CONFIG_ARM) && defined(CONFIG_ARM_LPAE))
2329 	rga_mmu_buf.buf = (uint32_t *)(uint32_t)virt_to_phys((void *)((unsigned long)mmu_base));
2330 #else
2331 	rga_mmu_buf.buf = (uint32_t *)virt_to_phys((void *)((unsigned long)mmu_base));
2332 #endif
2333 	rga_mmu_buf.front = 0;
2334 	rga_mmu_buf.back = 64*1024;
2335 	rga_mmu_buf.size = 64*1024;
2336 
2337 	rga_mmu_buf.pages = pages;
2338 
2339 	rga_session_global.pid = 0x0000ffff;
2340 	INIT_LIST_HEAD(&rga_session_global.waiting);
2341 	INIT_LIST_HEAD(&rga_session_global.running);
2342 	INIT_LIST_HEAD(&rga_session_global.list_session);
2343 
2344 	INIT_LIST_HEAD(&rga_service.waiting);
2345 	INIT_LIST_HEAD(&rga_service.running);
2346 	INIT_LIST_HEAD(&rga_service.done);
2347 	INIT_LIST_HEAD(&rga_service.session);
2348 
2349 	init_waitqueue_head(&rga_session_global.wait);
2350 	//mutex_lock(&rga_service.lock);
2351 	list_add_tail(&rga_session_global.list_session, &rga_service.session);
2352 	//mutex_unlock(&rga_service.lock);
2353 	atomic_set(&rga_session_global.task_running, 0);
2354 	atomic_set(&rga_session_global.num_done, 0);
2355 
2356 #if RGA_TEST_CASE
2357 	rga_test_0();
2358 #endif
2359 #if RGA_DEBUGFS
2360 	rga_debugfs_add();
2361 #endif
2362 
2363 	INFO("RGA Module initialized.\n");
2364 
2365 	return 0;
2366 
2367 free_pages_buf:
2368 	kfree(pages);
2369 
2370 free_mmu_base:
2371 	kfree(mmu_base);
2372 
2373 free_pre_scale_page_table:
2374 	for (i = 0; i < RGA_PRE_SCALE_PAGE_SIZE; i++)
2375 		if (pre_scale_page_table[i] != 0)
2376 			kfree(phys_to_virt((phys_addr_t)pre_scale_page_table[i]));
2377 
2378 	kfree(pre_scale_page_table);
2379 
2380 	return ret;
2381 }
2382 
rga_exit(void)2383 static void __exit rga_exit(void)
2384 {
2385 	phys_addr_t pre_scale_buf;
2386 
2387 	rga_power_off();
2388 
2389 	if (rga_service.pre_scale_buf != NULL) {
2390 		pre_scale_buf = (phys_addr_t)rga_service.pre_scale_buf[0];
2391 		if (pre_scale_buf)
2392 			kfree(phys_to_virt(pre_scale_buf));
2393 		kfree(rga_service.pre_scale_buf);
2394 	}
2395 	kfree(rga_mmu_buf.buf_virtual);
2396 	kfree(rga_mmu_buf.pages);
2397 
2398 	platform_driver_unregister(&rga_driver);
2399 }
2400 
2401 #if RGA_TEST_CASE
2402 
2403 extern struct fb_info * rk_get_fb(int fb_id);
2404 EXPORT_SYMBOL(rk_get_fb);
2405 
2406 extern void rk_direct_fb_show(struct fb_info * fbi);
2407 EXPORT_SYMBOL(rk_direct_fb_show);
2408 
2409 unsigned int src_buf[1920*1080];
2410 unsigned int dst_buf[1920*1080];
2411 //unsigned int tmp_buf[1920*1080 * 2];
2412 
rga_test_0(void)2413 void rga_test_0(void)
2414 {
2415     struct rga_req req;
2416     rga_session session;
2417     unsigned int *src, *dst;
2418     uint32_t i, j;
2419     uint8_t *p;
2420     uint8_t t;
2421     uint32_t *dst0, *dst1, *dst2;
2422 
2423     struct fb_info *fb;
2424 
2425     session.pid	= current->pid;
2426 	INIT_LIST_HEAD(&session.waiting);
2427 	INIT_LIST_HEAD(&session.running);
2428 	INIT_LIST_HEAD(&session.list_session);
2429 	init_waitqueue_head(&session.wait);
2430 	/* no need to protect */
2431 	list_add_tail(&session.list_session, &rga_service.session);
2432 	atomic_set(&session.task_running, 0);
2433     atomic_set(&session.num_done, 0);
2434 	//file->private_data = (void *)session;
2435 
2436     fb = rk_get_fb(0);
2437 
2438     memset(&req, 0, sizeof(struct rga_req));
2439     src = src_buf;
2440     dst = dst_buf;
2441 
2442     memset(src_buf, 0x80, 1024*600*4);
2443 
2444     dmac_flush_range(&src_buf[0], &src_buf[1024*600]);
2445     outer_flush_range(virt_to_phys(&src_buf[0]),virt_to_phys(&src_buf[1024*600]));
2446 
2447 
2448     #if 0
2449     memset(src_buf, 0x80, 800*480*4);
2450     memset(dst_buf, 0xcc, 800*480*4);
2451 
2452     dmac_flush_range(&dst_buf[0], &dst_buf[800*480]);
2453     outer_flush_range(virt_to_phys(&dst_buf[0]),virt_to_phys(&dst_buf[800*480]));
2454     #endif
2455 
2456     dst0 = &dst_buf[0];
2457     //dst1 = &dst_buf[1280*800*4];
2458     //dst2 = &dst_buf[1280*800*4*2];
2459 
2460     i = j = 0;
2461 
2462     printk("\n********************************\n");
2463     printk("************ RGA_TEST ************\n");
2464     printk("********************************\n\n");
2465 
2466     req.src.act_w = 1024;
2467     req.src.act_h = 600;
2468 
2469     req.src.vir_w = 1024;
2470     req.src.vir_h = 600;
2471     req.src.yrgb_addr = (uint32_t)virt_to_phys(src);
2472     req.src.uv_addr = (uint32_t)(req.src.yrgb_addr + 1080*1920);
2473     req.src.v_addr = (uint32_t)virt_to_phys(src);
2474     req.src.format = RK_FORMAT_RGBA_8888;
2475 
2476     req.dst.act_w = 600;
2477     req.dst.act_h = 352;
2478 
2479     req.dst.vir_w = 1280;
2480     req.dst.vir_h = 800;
2481     req.dst.x_offset = 600;
2482     req.dst.y_offset = 0;
2483 
2484     dst = dst0;
2485 
2486     req.dst.yrgb_addr = ((uint32_t)virt_to_phys(dst));
2487 
2488     //req.dst.format = RK_FORMAT_RGB_565;
2489 
2490     req.clip.xmin = 0;
2491     req.clip.xmax = 1279;
2492     req.clip.ymin = 0;
2493     req.clip.ymax = 799;
2494 
2495     //req.render_mode = color_fill_mode;
2496     //req.fg_color = 0x80ffffff;
2497 
2498     req.rotate_mode = 1;
2499     //req.scale_mode = 2;
2500 
2501     //req.alpha_rop_flag = 0;
2502     //req.alpha_rop_mode = 0x19;
2503     //req.PD_mode = 3;
2504 
2505     req.sina = 65536;
2506     req.cosa = 0;
2507 
2508     //req.mmu_info.mmu_flag = 0x21;
2509     //req.mmu_info.mmu_en = 1;
2510 
2511     //printk("src = %.8x\n", req.src.yrgb_addr);
2512     //printk("src = %.8x\n", req.src.uv_addr);
2513     //printk("dst = %.8x\n", req.dst.yrgb_addr);
2514 
2515 
2516     rga_blit_sync(&session, &req);
2517 
2518     #if 1
2519     fb->var.bits_per_pixel = 32;
2520 
2521     fb->var.xres = 1280;
2522     fb->var.yres = 800;
2523 
2524     fb->var.red.length = 8;
2525     fb->var.red.offset = 0;
2526     fb->var.red.msb_right = 0;
2527 
2528     fb->var.green.length = 8;
2529     fb->var.green.offset = 8;
2530     fb->var.green.msb_right = 0;
2531 
2532     fb->var.blue.length = 8;
2533 
2534     fb->var.blue.offset = 16;
2535     fb->var.blue.msb_right = 0;
2536 
2537     fb->var.transp.length = 8;
2538     fb->var.transp.offset = 24;
2539     fb->var.transp.msb_right = 0;
2540 
2541     fb->var.nonstd &= (~0xff);
2542     fb->var.nonstd |= 1;
2543 
2544     fb->fix.smem_start = virt_to_phys(dst);
2545 
2546     rk_direct_fb_show(fb);
2547     #endif
2548 
2549 }
2550 
2551 #endif
2552 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 1, 0))
2553 fs_initcall(rga_init);
2554 #else
2555 module_init(rga_init);
2556 #endif
2557 module_exit(rga_exit);
2558 
2559 /* Module information */
2560 MODULE_AUTHOR("zsq@rock-chips.com");
2561 MODULE_DESCRIPTION("Driver for rga device");
2562 MODULE_LICENSE("GPL");
2563