1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun * Copyright (C) 2012 ROCKCHIP, Inc.
3*4882a593Smuzhiyun *
4*4882a593Smuzhiyun * This software is licensed under the terms of the GNU General Public
5*4882a593Smuzhiyun * License version 2, as published by the Free Software Foundation, and
6*4882a593Smuzhiyun * may be copied, distributed, and modified under those terms.
7*4882a593Smuzhiyun *
8*4882a593Smuzhiyun * This program is distributed in the hope that it will be useful,
9*4882a593Smuzhiyun * but WITHOUT ANY WARRANTY; without even the implied warranty of
10*4882a593Smuzhiyun * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11*4882a593Smuzhiyun * GNU General Public License for more details.
12*4882a593Smuzhiyun *
13*4882a593Smuzhiyun */
14*4882a593Smuzhiyun
15*4882a593Smuzhiyun #define pr_fmt(fmt) "rga: " fmt
16*4882a593Smuzhiyun #include <linux/kernel.h>
17*4882a593Smuzhiyun #include <linux/init.h>
18*4882a593Smuzhiyun #include <linux/module.h>
19*4882a593Smuzhiyun #include <linux/platform_device.h>
20*4882a593Smuzhiyun #include <linux/sched.h>
21*4882a593Smuzhiyun #include <linux/mutex.h>
22*4882a593Smuzhiyun #include <linux/err.h>
23*4882a593Smuzhiyun #include <linux/clk.h>
24*4882a593Smuzhiyun #include <asm/delay.h>
25*4882a593Smuzhiyun #include <linux/dma-mapping.h>
26*4882a593Smuzhiyun #include <linux/delay.h>
27*4882a593Smuzhiyun #include <asm/io.h>
28*4882a593Smuzhiyun #include <linux/irq.h>
29*4882a593Smuzhiyun #include <linux/interrupt.h>
30*4882a593Smuzhiyun //#include <mach/io.h>
31*4882a593Smuzhiyun //#include <mach/irqs.h>
32*4882a593Smuzhiyun #include <linux/fs.h>
33*4882a593Smuzhiyun #include <linux/uaccess.h>
34*4882a593Smuzhiyun #include <linux/miscdevice.h>
35*4882a593Smuzhiyun #include <linux/poll.h>
36*4882a593Smuzhiyun #include <linux/delay.h>
37*4882a593Smuzhiyun #include <linux/wait.h>
38*4882a593Smuzhiyun #include <linux/syscalls.h>
39*4882a593Smuzhiyun #include <linux/timer.h>
40*4882a593Smuzhiyun #include <linux/time.h>
41*4882a593Smuzhiyun #include <asm/cacheflush.h>
42*4882a593Smuzhiyun #include <linux/slab.h>
43*4882a593Smuzhiyun #include <linux/fb.h>
44*4882a593Smuzhiyun #include <linux/wakelock.h>
45*4882a593Smuzhiyun #include <linux/version.h>
46*4882a593Smuzhiyun #include <linux/debugfs.h>
47*4882a593Smuzhiyun #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0))
48*4882a593Smuzhiyun #include <linux/dma-buf.h>
49*4882a593Smuzhiyun #include <linux/pm_runtime.h>
50*4882a593Smuzhiyun #endif
51*4882a593Smuzhiyun
52*4882a593Smuzhiyun #if defined(CONFIG_ION_ROCKCHIP)
53*4882a593Smuzhiyun #include <linux/rockchip_ion.h>
54*4882a593Smuzhiyun #endif
55*4882a593Smuzhiyun
56*4882a593Smuzhiyun #include "rga.h"
57*4882a593Smuzhiyun #include "rga_reg_info.h"
58*4882a593Smuzhiyun #include "rga_mmu_info.h"
59*4882a593Smuzhiyun #include "RGA_API.h"
60*4882a593Smuzhiyun
61*4882a593Smuzhiyun #define RGA_TEST_CASE 0
62*4882a593Smuzhiyun
63*4882a593Smuzhiyun #define RGA_TEST_FLUSH_TIME 0
64*4882a593Smuzhiyun #define RGA_INFO_BUS_ERROR 1
65*4882a593Smuzhiyun
66*4882a593Smuzhiyun #define RGA_PRE_SCALE_BUF_SIZE (2048 * 2048 * 4)
67*4882a593Smuzhiyun #define RGA_PRE_SCALE_PAGE_SIZE (RGA_PRE_SCALE_BUF_SIZE >> PAGE_SHIFT)
68*4882a593Smuzhiyun
69*4882a593Smuzhiyun #define RGA_POWER_OFF_DELAY 4*HZ /* 4s */
70*4882a593Smuzhiyun #define RGA_TIMEOUT_DELAY 2*HZ /* 2s */
71*4882a593Smuzhiyun
72*4882a593Smuzhiyun #define RGA_MAJOR 255
73*4882a593Smuzhiyun
74*4882a593Smuzhiyun #if defined(CONFIG_ARCH_RK2928) || defined(CONFIG_ARCH_RK3026)
75*4882a593Smuzhiyun #define RK30_RGA_PHYS RK2928_RGA_PHYS
76*4882a593Smuzhiyun #define RK30_RGA_SIZE RK2928_RGA_SIZE
77*4882a593Smuzhiyun #endif
78*4882a593Smuzhiyun #define RGA_RESET_TIMEOUT 1000
79*4882a593Smuzhiyun
80*4882a593Smuzhiyun /* Driver information */
81*4882a593Smuzhiyun #define DRIVER_DESC "RGA Device Driver"
82*4882a593Smuzhiyun #define DRIVER_NAME "rga"
83*4882a593Smuzhiyun
84*4882a593Smuzhiyun
85*4882a593Smuzhiyun ktime_t rga_start;
86*4882a593Smuzhiyun ktime_t rga_end;
87*4882a593Smuzhiyun
88*4882a593Smuzhiyun static rga_session rga_session_global;
89*4882a593Smuzhiyun
90*4882a593Smuzhiyun long (*rga_ioctl_kernel_p)(struct rga_req *);
91*4882a593Smuzhiyun
92*4882a593Smuzhiyun #if RGA_DEBUGFS
93*4882a593Smuzhiyun unsigned char RGA_TEST_REG;
94*4882a593Smuzhiyun unsigned char RGA_TEST_MSG;
95*4882a593Smuzhiyun unsigned char RGA_TEST_TIME;
96*4882a593Smuzhiyun unsigned char RGA_CHECK_MODE;
97*4882a593Smuzhiyun unsigned char RGA_NONUSE;
98*4882a593Smuzhiyun unsigned char RGA_INT_FLAG;
99*4882a593Smuzhiyun #endif
100*4882a593Smuzhiyun
101*4882a593Smuzhiyun struct rga_drvdata *rga_drvdata;
102*4882a593Smuzhiyun rga_service_info rga_service;
103*4882a593Smuzhiyun struct rga_mmu_buf_t rga_mmu_buf;
104*4882a593Smuzhiyun
105*4882a593Smuzhiyun
106*4882a593Smuzhiyun #if defined(CONFIG_ION_ROCKCHIP)
107*4882a593Smuzhiyun extern struct ion_client *rockchip_ion_client_create(const char * name);
108*4882a593Smuzhiyun #endif
109*4882a593Smuzhiyun
110*4882a593Smuzhiyun static int rga_blit_async(rga_session *session, struct rga_req *req);
111*4882a593Smuzhiyun static void rga_del_running_list(void);
112*4882a593Smuzhiyun static void rga_del_running_list_timeout(void);
113*4882a593Smuzhiyun static void rga_try_set_reg(void);
114*4882a593Smuzhiyun
115*4882a593Smuzhiyun
116*4882a593Smuzhiyun /* Logging */
117*4882a593Smuzhiyun #define RGA_DEBUG 1
118*4882a593Smuzhiyun #if RGA_DEBUG
119*4882a593Smuzhiyun #define DBG(format, args...) printk(KERN_DEBUG "%s: " format, DRIVER_NAME, ## args)
120*4882a593Smuzhiyun #define ERR(format, args...) printk(KERN_ERR "%s: " format, DRIVER_NAME, ## args)
121*4882a593Smuzhiyun #define WARNING(format, args...) printk(KERN_WARN "%s: " format, DRIVER_NAME, ## args)
122*4882a593Smuzhiyun #define INFO(format, args...) printk(KERN_INFO "%s: " format, DRIVER_NAME, ## args)
123*4882a593Smuzhiyun #else
124*4882a593Smuzhiyun #define DBG(format, args...)
125*4882a593Smuzhiyun #define ERR(format, args...)
126*4882a593Smuzhiyun #define WARNING(format, args...)
127*4882a593Smuzhiyun #define INFO(format, args...)
128*4882a593Smuzhiyun #endif
129*4882a593Smuzhiyun
130*4882a593Smuzhiyun #if RGA_DEBUGFS
rga_get_cmd_mode_str(u32 cmd)131*4882a593Smuzhiyun static const char *rga_get_cmd_mode_str(u32 cmd)
132*4882a593Smuzhiyun {
133*4882a593Smuzhiyun switch (cmd) {
134*4882a593Smuzhiyun case RGA_BLIT_SYNC:
135*4882a593Smuzhiyun return "RGA_BLIT_SYNC";
136*4882a593Smuzhiyun case RGA_BLIT_ASYNC:
137*4882a593Smuzhiyun return "RGA_BLIT_ASYNC";
138*4882a593Smuzhiyun case RGA_FLUSH:
139*4882a593Smuzhiyun return "RGA_FLUSH";
140*4882a593Smuzhiyun case RGA_GET_RESULT:
141*4882a593Smuzhiyun return "RGA_GET_RESULT";
142*4882a593Smuzhiyun case RGA_GET_VERSION:
143*4882a593Smuzhiyun return "RGA_GET_VERSION";
144*4882a593Smuzhiyun default:
145*4882a593Smuzhiyun return "UNF";
146*4882a593Smuzhiyun }
147*4882a593Smuzhiyun }
148*4882a593Smuzhiyun
rga_get_blend_mode_str(u16 alpha_rop_flag)149*4882a593Smuzhiyun static const char *rga_get_blend_mode_str(u16 alpha_rop_flag)
150*4882a593Smuzhiyun {
151*4882a593Smuzhiyun if (alpha_rop_flag == 0)
152*4882a593Smuzhiyun return "no blend";
153*4882a593Smuzhiyun else if (alpha_rop_flag == 0x19)
154*4882a593Smuzhiyun return "blend mode 105 src + (1 - src.a) * dst";
155*4882a593Smuzhiyun else if (alpha_rop_flag == 0x11)
156*4882a593Smuzhiyun return "blend mode 405 src.a * src + (1 - src.a) * dst";
157*4882a593Smuzhiyun else
158*4882a593Smuzhiyun return "check reg for more imformation";
159*4882a593Smuzhiyun }
160*4882a593Smuzhiyun
rga_get_render_mode_str(u8 mode)161*4882a593Smuzhiyun static const char *rga_get_render_mode_str(u8 mode)
162*4882a593Smuzhiyun {
163*4882a593Smuzhiyun switch (mode & 0x0F) {
164*4882a593Smuzhiyun case 0x0:
165*4882a593Smuzhiyun return "bitblt";
166*4882a593Smuzhiyun case 0x1:
167*4882a593Smuzhiyun return "color_palette";
168*4882a593Smuzhiyun case 0x2:
169*4882a593Smuzhiyun return "color_fill";
170*4882a593Smuzhiyun case 0x3:
171*4882a593Smuzhiyun return "line_point_drawing";
172*4882a593Smuzhiyun case 0x4:
173*4882a593Smuzhiyun return "blur_sharp_filter";
174*4882a593Smuzhiyun case 0x5:
175*4882a593Smuzhiyun return "pre_scaling";
176*4882a593Smuzhiyun case 0x6:
177*4882a593Smuzhiyun return "update_palette_table";
178*4882a593Smuzhiyun case 0x7:
179*4882a593Smuzhiyun return "update_patten_buff";
180*4882a593Smuzhiyun default:
181*4882a593Smuzhiyun return "UNF";
182*4882a593Smuzhiyun }
183*4882a593Smuzhiyun }
184*4882a593Smuzhiyun
rga_get_rotate_mode_str(struct rga_req * req_rga)185*4882a593Smuzhiyun static const char *rga_get_rotate_mode_str(struct rga_req *req_rga)
186*4882a593Smuzhiyun {
187*4882a593Smuzhiyun switch (req_rga->rotate_mode) {
188*4882a593Smuzhiyun case 0x0:
189*4882a593Smuzhiyun return "no rotate";
190*4882a593Smuzhiyun case 0x1:
191*4882a593Smuzhiyun if (req_rga->sina == 0 && req_rga->cosa == 65536)
192*4882a593Smuzhiyun /* rotate 0 */
193*4882a593Smuzhiyun return "rotate 0";
194*4882a593Smuzhiyun else if (req_rga->sina == 65536 && req_rga->cosa == 0)
195*4882a593Smuzhiyun /* rotate 90 */
196*4882a593Smuzhiyun return "rotate 90 ";
197*4882a593Smuzhiyun else if (req_rga->sina == 0 && req_rga->cosa == -65536)
198*4882a593Smuzhiyun /* rotate 180 */
199*4882a593Smuzhiyun return "rotate 180 ";
200*4882a593Smuzhiyun else if (req_rga->sina == -65536 && req_rga->cosa == 0)
201*4882a593Smuzhiyun /* totate 270 */
202*4882a593Smuzhiyun return "rotate 270 ";
203*4882a593Smuzhiyun return "UNF";
204*4882a593Smuzhiyun case 0x2:
205*4882a593Smuzhiyun return "xmirror";
206*4882a593Smuzhiyun case 0x3:
207*4882a593Smuzhiyun return "ymirror";
208*4882a593Smuzhiyun default:
209*4882a593Smuzhiyun return "UNF";
210*4882a593Smuzhiyun }
211*4882a593Smuzhiyun }
212*4882a593Smuzhiyun
rga_is_yuv10bit_format(uint32_t format)213*4882a593Smuzhiyun static bool rga_is_yuv10bit_format(uint32_t format)
214*4882a593Smuzhiyun {
215*4882a593Smuzhiyun bool ret = false;
216*4882a593Smuzhiyun
217*4882a593Smuzhiyun switch (format) {
218*4882a593Smuzhiyun case RK_FORMAT_YCbCr_420_SP_10B:
219*4882a593Smuzhiyun case RK_FORMAT_YCrCb_420_SP_10B:
220*4882a593Smuzhiyun ret = true;
221*4882a593Smuzhiyun break;
222*4882a593Smuzhiyun }
223*4882a593Smuzhiyun return ret;
224*4882a593Smuzhiyun }
225*4882a593Smuzhiyun
rga_is_yuv8bit_format(uint32_t format)226*4882a593Smuzhiyun static bool rga_is_yuv8bit_format(uint32_t format)
227*4882a593Smuzhiyun {
228*4882a593Smuzhiyun bool ret = false;
229*4882a593Smuzhiyun
230*4882a593Smuzhiyun switch (format) {
231*4882a593Smuzhiyun case RK_FORMAT_YCbCr_422_SP:
232*4882a593Smuzhiyun case RK_FORMAT_YCbCr_422_P:
233*4882a593Smuzhiyun case RK_FORMAT_YCbCr_420_SP:
234*4882a593Smuzhiyun case RK_FORMAT_YCbCr_420_P:
235*4882a593Smuzhiyun case RK_FORMAT_YCrCb_422_SP:
236*4882a593Smuzhiyun case RK_FORMAT_YCrCb_422_P:
237*4882a593Smuzhiyun case RK_FORMAT_YCrCb_420_SP:
238*4882a593Smuzhiyun case RK_FORMAT_YCrCb_420_P:
239*4882a593Smuzhiyun ret = true;
240*4882a593Smuzhiyun break;
241*4882a593Smuzhiyun }
242*4882a593Smuzhiyun return ret;
243*4882a593Smuzhiyun }
244*4882a593Smuzhiyun
rga_get_format_name(uint32_t format)245*4882a593Smuzhiyun static const char *rga_get_format_name(uint32_t format)
246*4882a593Smuzhiyun {
247*4882a593Smuzhiyun switch (format) {
248*4882a593Smuzhiyun case RK_FORMAT_RGBA_8888:
249*4882a593Smuzhiyun return "RGBA8888";
250*4882a593Smuzhiyun case RK_FORMAT_RGBX_8888:
251*4882a593Smuzhiyun return "RGBX8888";
252*4882a593Smuzhiyun case RK_FORMAT_RGB_888:
253*4882a593Smuzhiyun return "RGB888";
254*4882a593Smuzhiyun case RK_FORMAT_BGRA_8888:
255*4882a593Smuzhiyun return "BGRA8888";
256*4882a593Smuzhiyun case RK_FORMAT_RGB_565:
257*4882a593Smuzhiyun return "RGB565";
258*4882a593Smuzhiyun case RK_FORMAT_RGBA_5551:
259*4882a593Smuzhiyun return "RGBA5551";
260*4882a593Smuzhiyun case RK_FORMAT_RGBA_4444:
261*4882a593Smuzhiyun return "RGBA4444";
262*4882a593Smuzhiyun case RK_FORMAT_BGR_888:
263*4882a593Smuzhiyun return "BGR888";
264*4882a593Smuzhiyun
265*4882a593Smuzhiyun case RK_FORMAT_YCbCr_422_SP:
266*4882a593Smuzhiyun return "YCbCr422SP";
267*4882a593Smuzhiyun case RK_FORMAT_YCbCr_422_P:
268*4882a593Smuzhiyun return "YCbCr422P";
269*4882a593Smuzhiyun case RK_FORMAT_YCbCr_420_SP:
270*4882a593Smuzhiyun return "YCbCr420SP";
271*4882a593Smuzhiyun case RK_FORMAT_YCbCr_420_P:
272*4882a593Smuzhiyun return "YCbCr420P";
273*4882a593Smuzhiyun case RK_FORMAT_YCrCb_422_SP:
274*4882a593Smuzhiyun return "YCrCb422SP";
275*4882a593Smuzhiyun case RK_FORMAT_YCrCb_422_P:
276*4882a593Smuzhiyun return "YCrCb422P";
277*4882a593Smuzhiyun case RK_FORMAT_YCrCb_420_SP:
278*4882a593Smuzhiyun return "YCrCb420SP";
279*4882a593Smuzhiyun case RK_FORMAT_YCrCb_420_P:
280*4882a593Smuzhiyun return "YCrCb420P";
281*4882a593Smuzhiyun
282*4882a593Smuzhiyun case RK_FORMAT_BPP1:
283*4882a593Smuzhiyun return "BPP1";
284*4882a593Smuzhiyun case RK_FORMAT_BPP2:
285*4882a593Smuzhiyun return "BPP2";
286*4882a593Smuzhiyun case RK_FORMAT_BPP4:
287*4882a593Smuzhiyun return "BPP4";
288*4882a593Smuzhiyun case RK_FORMAT_BPP8:
289*4882a593Smuzhiyun return "BPP8";
290*4882a593Smuzhiyun case RK_FORMAT_YCbCr_420_SP_10B:
291*4882a593Smuzhiyun return "YCrCb420SP10B";
292*4882a593Smuzhiyun case RK_FORMAT_YCrCb_420_SP_10B:
293*4882a593Smuzhiyun return "YCbCr420SP10B";
294*4882a593Smuzhiyun default:
295*4882a593Smuzhiyun return "UNF";
296*4882a593Smuzhiyun }
297*4882a593Smuzhiyun }
298*4882a593Smuzhiyun
print_debug_info(struct rga_req * req)299*4882a593Smuzhiyun static void print_debug_info(struct rga_req *req)
300*4882a593Smuzhiyun {
301*4882a593Smuzhiyun DBG("render_mode %s, rotate_mode %s, blit mode %d\n",
302*4882a593Smuzhiyun rga_get_render_mode_str(req->render_mode),
303*4882a593Smuzhiyun rga_get_rotate_mode_str(req), req->bsfilter_flag);
304*4882a593Smuzhiyun DBG("src : y=%lx uv=%lx v=%lx format=%s aw=%d ah=%d vw=%d vh=%d xoff=%d yoff=%d\n",
305*4882a593Smuzhiyun req->src.yrgb_addr, req->src.uv_addr, req->src.v_addr,
306*4882a593Smuzhiyun rga_get_format_name(req->src.format),
307*4882a593Smuzhiyun req->src.act_w, req->src.act_h, req->src.vir_w, req->src.vir_h,
308*4882a593Smuzhiyun req->src.x_offset, req->src.y_offset);
309*4882a593Smuzhiyun DBG("dst : y=%lx uv=%lx v=%lx format=%s aw=%d ah=%d vw=%d vh=%d xoff=%d yoff=%d\n",
310*4882a593Smuzhiyun req->dst.yrgb_addr, req->dst.uv_addr, req->dst.v_addr,
311*4882a593Smuzhiyun rga_get_format_name(req->dst.format),
312*4882a593Smuzhiyun req->dst.act_w, req->dst.act_h, req->dst.vir_w, req->dst.vir_h,
313*4882a593Smuzhiyun req->dst.x_offset, req->dst.y_offset);
314*4882a593Smuzhiyun DBG("mmuflg = %.8x, mmuen is %d\n", req->mmu_info.mmu_flag, req->mmu_info.mmu_en);
315*4882a593Smuzhiyun DBG("clip.xmin = %d, clip.xmax = %d, clip.ymin = %d, clip.ymax = %d\n",
316*4882a593Smuzhiyun req->clip.xmin, req->clip.xmax, req->clip.ymin, req->clip.ymax);
317*4882a593Smuzhiyun DBG("alpha: flag %.8x mode=%.8x\n", req->alpha_rop_flag, req->alpha_rop_mode);
318*4882a593Smuzhiyun DBG("blend mode:%s\n", rga_get_blend_mode_str(req->alpha_rop_flag));
319*4882a593Smuzhiyun DBG("yuv2rgb mode:%x\n", req->yuv2rgb_mode);
320*4882a593Smuzhiyun }
321*4882a593Smuzhiyun
rga_align_check(struct rga_req * req)322*4882a593Smuzhiyun static int rga_align_check(struct rga_req *req)
323*4882a593Smuzhiyun {
324*4882a593Smuzhiyun if (rga_is_yuv10bit_format(req->src.format)) {
325*4882a593Smuzhiyun if ((req->src.vir_w % 16) || (req->src.x_offset % 2) ||
326*4882a593Smuzhiyun (req->src.act_w % 2) || (req->src.y_offset % 2) ||
327*4882a593Smuzhiyun (req->src.act_h % 2) || (req->src.vir_h % 2))
328*4882a593Smuzhiyun DBG("err src wstride is not align to 16 or yuv not align to 2");
329*4882a593Smuzhiyun }
330*4882a593Smuzhiyun if (rga_is_yuv10bit_format(req->dst.format)) {
331*4882a593Smuzhiyun if ((req->dst.vir_w % 16) || (req->dst.x_offset % 2) ||
332*4882a593Smuzhiyun (req->dst.act_w % 2) || (req->dst.y_offset % 2) ||
333*4882a593Smuzhiyun (req->dst.act_h % 2) || (req->dst.vir_h % 2))
334*4882a593Smuzhiyun DBG("err dst wstride is not align to 16 or yuv not align to 2");
335*4882a593Smuzhiyun }
336*4882a593Smuzhiyun if (rga_is_yuv8bit_format(req->src.format)) {
337*4882a593Smuzhiyun if ((req->src.vir_w % 8) || (req->src.x_offset % 2) ||
338*4882a593Smuzhiyun (req->src.act_w % 2) || (req->src.y_offset % 2) ||
339*4882a593Smuzhiyun (req->src.act_h % 2) || (req->src.vir_h % 2))
340*4882a593Smuzhiyun DBG("err src wstride is not align to 8 or yuv not align to 2");
341*4882a593Smuzhiyun }
342*4882a593Smuzhiyun if (rga_is_yuv8bit_format(req->dst.format)) {
343*4882a593Smuzhiyun if ((req->dst.vir_w % 8) || (req->dst.x_offset % 2) ||
344*4882a593Smuzhiyun (req->dst.act_w % 2) || (req->dst.y_offset % 2) ||
345*4882a593Smuzhiyun (req->dst.act_h % 2) || (req->dst.vir_h % 2))
346*4882a593Smuzhiyun DBG("err dst wstride is not align to 8 or yuv not align to 2");
347*4882a593Smuzhiyun }
348*4882a593Smuzhiyun DBG("rga align check over!\n");
349*4882a593Smuzhiyun return 0;
350*4882a593Smuzhiyun }
351*4882a593Smuzhiyun
rga_memory_check(void * vaddr,u32 w,u32 h,u32 format,int fd)352*4882a593Smuzhiyun static int rga_memory_check(void *vaddr, u32 w, u32 h, u32 format, int fd)
353*4882a593Smuzhiyun {
354*4882a593Smuzhiyun int bits = 32;
355*4882a593Smuzhiyun int temp_data = 0;
356*4882a593Smuzhiyun void *one_line = kzalloc(w * 4, GFP_KERNEL);
357*4882a593Smuzhiyun
358*4882a593Smuzhiyun if (!one_line) {
359*4882a593Smuzhiyun pr_err("kzalloc fail %s[%d]\n", __func__, __LINE__);
360*4882a593Smuzhiyun return 0;
361*4882a593Smuzhiyun }
362*4882a593Smuzhiyun
363*4882a593Smuzhiyun switch (format) {
364*4882a593Smuzhiyun case RK_FORMAT_RGBA_8888:
365*4882a593Smuzhiyun case RK_FORMAT_RGBX_8888:
366*4882a593Smuzhiyun case RK_FORMAT_BGRA_8888:
367*4882a593Smuzhiyun bits = 32;
368*4882a593Smuzhiyun break;
369*4882a593Smuzhiyun case RK_FORMAT_RGB_888:
370*4882a593Smuzhiyun case RK_FORMAT_BGR_888:
371*4882a593Smuzhiyun bits = 24;
372*4882a593Smuzhiyun break;
373*4882a593Smuzhiyun case RK_FORMAT_RGB_565:
374*4882a593Smuzhiyun case RK_FORMAT_RGBA_5551:
375*4882a593Smuzhiyun case RK_FORMAT_RGBA_4444:
376*4882a593Smuzhiyun case RK_FORMAT_YCbCr_422_SP:
377*4882a593Smuzhiyun case RK_FORMAT_YCbCr_422_P:
378*4882a593Smuzhiyun case RK_FORMAT_YCrCb_422_SP:
379*4882a593Smuzhiyun case RK_FORMAT_YCrCb_422_P:
380*4882a593Smuzhiyun bits = 16;
381*4882a593Smuzhiyun break;
382*4882a593Smuzhiyun case RK_FORMAT_YCbCr_420_SP:
383*4882a593Smuzhiyun case RK_FORMAT_YCbCr_420_P:
384*4882a593Smuzhiyun case RK_FORMAT_YCrCb_420_SP:
385*4882a593Smuzhiyun case RK_FORMAT_YCrCb_420_P:
386*4882a593Smuzhiyun bits = 12;
387*4882a593Smuzhiyun break;
388*4882a593Smuzhiyun case RK_FORMAT_YCbCr_420_SP_10B:
389*4882a593Smuzhiyun case RK_FORMAT_YCrCb_420_SP_10B:
390*4882a593Smuzhiyun bits = 15;
391*4882a593Smuzhiyun break;
392*4882a593Smuzhiyun default:
393*4882a593Smuzhiyun DBG("un know format\n");
394*4882a593Smuzhiyun kfree(one_line);
395*4882a593Smuzhiyun return -1;
396*4882a593Smuzhiyun }
397*4882a593Smuzhiyun temp_data = w * (h - 1) * bits / 8;
398*4882a593Smuzhiyun if (fd > 0) {
399*4882a593Smuzhiyun DBG("vaddr is%p, bits is %d, fd check\n", vaddr, bits);
400*4882a593Smuzhiyun memcpy(one_line, (char *)vaddr + temp_data, w * bits / 8);
401*4882a593Smuzhiyun DBG("fd check ok\n");
402*4882a593Smuzhiyun } else {
403*4882a593Smuzhiyun DBG("vir addr memory check.\n");
404*4882a593Smuzhiyun memcpy((void *)((char *)vaddr + temp_data), one_line, w * bits / 8);
405*4882a593Smuzhiyun DBG("vir addr check ok.\n");
406*4882a593Smuzhiyun }
407*4882a593Smuzhiyun kfree(one_line);
408*4882a593Smuzhiyun return 0;
409*4882a593Smuzhiyun }
410*4882a593Smuzhiyun #endif
411*4882a593Smuzhiyun
rga_write(u32 b,u32 r)412*4882a593Smuzhiyun static inline void rga_write(u32 b, u32 r)
413*4882a593Smuzhiyun {
414*4882a593Smuzhiyun __raw_writel(b, rga_drvdata->rga_base + r);
415*4882a593Smuzhiyun }
416*4882a593Smuzhiyun
rga_read(u32 r)417*4882a593Smuzhiyun static inline u32 rga_read(u32 r)
418*4882a593Smuzhiyun {
419*4882a593Smuzhiyun return __raw_readl(rga_drvdata->rga_base + r);
420*4882a593Smuzhiyun }
421*4882a593Smuzhiyun
rga_soft_reset(void)422*4882a593Smuzhiyun static void rga_soft_reset(void)
423*4882a593Smuzhiyun {
424*4882a593Smuzhiyun u32 i;
425*4882a593Smuzhiyun u32 reg;
426*4882a593Smuzhiyun
427*4882a593Smuzhiyun rga_write(1, RGA_SYS_CTRL); //RGA_SYS_CTRL
428*4882a593Smuzhiyun
429*4882a593Smuzhiyun for(i = 0; i < RGA_RESET_TIMEOUT; i++)
430*4882a593Smuzhiyun {
431*4882a593Smuzhiyun reg = rga_read(RGA_SYS_CTRL) & 1; //RGA_SYS_CTRL
432*4882a593Smuzhiyun
433*4882a593Smuzhiyun if(reg == 0)
434*4882a593Smuzhiyun break;
435*4882a593Smuzhiyun
436*4882a593Smuzhiyun udelay(1);
437*4882a593Smuzhiyun }
438*4882a593Smuzhiyun
439*4882a593Smuzhiyun if(i == RGA_RESET_TIMEOUT)
440*4882a593Smuzhiyun ERR("soft reset timeout.\n");
441*4882a593Smuzhiyun }
442*4882a593Smuzhiyun
rga_dump(void)443*4882a593Smuzhiyun static void rga_dump(void)
444*4882a593Smuzhiyun {
445*4882a593Smuzhiyun int running;
446*4882a593Smuzhiyun struct rga_reg *reg, *reg_tmp;
447*4882a593Smuzhiyun rga_session *session, *session_tmp;
448*4882a593Smuzhiyun
449*4882a593Smuzhiyun running = atomic_read(&rga_service.total_running);
450*4882a593Smuzhiyun printk("rga total_running %d\n", running);
451*4882a593Smuzhiyun
452*4882a593Smuzhiyun #if 0
453*4882a593Smuzhiyun
454*4882a593Smuzhiyun /* Dump waiting list info */
455*4882a593Smuzhiyun if (!list_empty(&rga_service.waiting))
456*4882a593Smuzhiyun {
457*4882a593Smuzhiyun list_head *next;
458*4882a593Smuzhiyun
459*4882a593Smuzhiyun next = &rga_service.waiting;
460*4882a593Smuzhiyun
461*4882a593Smuzhiyun printk("rga_service dump waiting list\n");
462*4882a593Smuzhiyun
463*4882a593Smuzhiyun do
464*4882a593Smuzhiyun {
465*4882a593Smuzhiyun reg = list_entry(next->next, struct rga_reg, status_link);
466*4882a593Smuzhiyun running = atomic_read(®->session->task_running);
467*4882a593Smuzhiyun num_done = atomic_read(®->session->num_done);
468*4882a593Smuzhiyun printk("rga session pid %d, done %d, running %d\n", reg->session->pid, num_done, running);
469*4882a593Smuzhiyun next = next->next;
470*4882a593Smuzhiyun }
471*4882a593Smuzhiyun while(!list_empty(next));
472*4882a593Smuzhiyun }
473*4882a593Smuzhiyun
474*4882a593Smuzhiyun /* Dump running list info */
475*4882a593Smuzhiyun if (!list_empty(&rga_service.running))
476*4882a593Smuzhiyun {
477*4882a593Smuzhiyun printk("rga_service dump running list\n");
478*4882a593Smuzhiyun
479*4882a593Smuzhiyun list_head *next;
480*4882a593Smuzhiyun
481*4882a593Smuzhiyun next = &rga_service.running;
482*4882a593Smuzhiyun do
483*4882a593Smuzhiyun {
484*4882a593Smuzhiyun reg = list_entry(next->next, struct rga_reg, status_link);
485*4882a593Smuzhiyun running = atomic_read(®->session->task_running);
486*4882a593Smuzhiyun num_done = atomic_read(®->session->num_done);
487*4882a593Smuzhiyun printk("rga session pid %d, done %d, running %d:\n", reg->session->pid, num_done, running);
488*4882a593Smuzhiyun next = next->next;
489*4882a593Smuzhiyun }
490*4882a593Smuzhiyun while(!list_empty(next));
491*4882a593Smuzhiyun }
492*4882a593Smuzhiyun #endif
493*4882a593Smuzhiyun
494*4882a593Smuzhiyun list_for_each_entry_safe(session, session_tmp, &rga_service.session, list_session)
495*4882a593Smuzhiyun {
496*4882a593Smuzhiyun printk("session pid %d:\n", session->pid);
497*4882a593Smuzhiyun running = atomic_read(&session->task_running);
498*4882a593Smuzhiyun printk("task_running %d\n", running);
499*4882a593Smuzhiyun list_for_each_entry_safe(reg, reg_tmp, &session->waiting, session_link)
500*4882a593Smuzhiyun {
501*4882a593Smuzhiyun printk("waiting register set 0x %.lu\n", (unsigned long)reg);
502*4882a593Smuzhiyun }
503*4882a593Smuzhiyun list_for_each_entry_safe(reg, reg_tmp, &session->running, session_link)
504*4882a593Smuzhiyun {
505*4882a593Smuzhiyun printk("running register set 0x %.lu\n", (unsigned long)reg);
506*4882a593Smuzhiyun }
507*4882a593Smuzhiyun }
508*4882a593Smuzhiyun }
509*4882a593Smuzhiyun
rga_queue_power_off_work(void)510*4882a593Smuzhiyun static inline void rga_queue_power_off_work(void)
511*4882a593Smuzhiyun {
512*4882a593Smuzhiyun #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0))
513*4882a593Smuzhiyun queue_delayed_work(system_wq, &rga_drvdata->power_off_work, RGA_POWER_OFF_DELAY);
514*4882a593Smuzhiyun #else
515*4882a593Smuzhiyun queue_delayed_work(system_nrt_wq, &rga_drvdata->power_off_work, RGA_POWER_OFF_DELAY);
516*4882a593Smuzhiyun #endif
517*4882a593Smuzhiyun }
518*4882a593Smuzhiyun
519*4882a593Smuzhiyun /* Caller must hold rga_service.lock */
rga_power_on(void)520*4882a593Smuzhiyun static void rga_power_on(void)
521*4882a593Smuzhiyun {
522*4882a593Smuzhiyun static ktime_t last;
523*4882a593Smuzhiyun ktime_t now = ktime_get();
524*4882a593Smuzhiyun
525*4882a593Smuzhiyun if (ktime_to_ns(ktime_sub(now, last)) > NSEC_PER_SEC) {
526*4882a593Smuzhiyun cancel_delayed_work_sync(&rga_drvdata->power_off_work);
527*4882a593Smuzhiyun rga_queue_power_off_work();
528*4882a593Smuzhiyun last = now;
529*4882a593Smuzhiyun }
530*4882a593Smuzhiyun if (rga_service.enable)
531*4882a593Smuzhiyun return;
532*4882a593Smuzhiyun
533*4882a593Smuzhiyun #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0))
534*4882a593Smuzhiyun clk_prepare_enable(rga_drvdata->aclk_rga);
535*4882a593Smuzhiyun clk_prepare_enable(rga_drvdata->hclk_rga);
536*4882a593Smuzhiyun pm_runtime_get_sync(rga_drvdata->dev);
537*4882a593Smuzhiyun #else
538*4882a593Smuzhiyun clk_prepare_enable(rga_drvdata->aclk_rga);
539*4882a593Smuzhiyun clk_prepare_enable(rga_drvdata->hclk_rga);
540*4882a593Smuzhiyun if (rga_drvdata->pd_rga)
541*4882a593Smuzhiyun clk_prepare_enable(rga_drvdata->pd_rga);
542*4882a593Smuzhiyun #endif
543*4882a593Smuzhiyun
544*4882a593Smuzhiyun wake_lock(&rga_drvdata->wake_lock);
545*4882a593Smuzhiyun rga_service.enable = true;
546*4882a593Smuzhiyun }
547*4882a593Smuzhiyun
548*4882a593Smuzhiyun /* Caller must hold rga_service.lock */
rga_power_off(void)549*4882a593Smuzhiyun static void rga_power_off(void)
550*4882a593Smuzhiyun {
551*4882a593Smuzhiyun int total_running;
552*4882a593Smuzhiyun
553*4882a593Smuzhiyun if (!rga_service.enable) {
554*4882a593Smuzhiyun return;
555*4882a593Smuzhiyun }
556*4882a593Smuzhiyun
557*4882a593Smuzhiyun total_running = atomic_read(&rga_service.total_running);
558*4882a593Smuzhiyun if (total_running) {
559*4882a593Smuzhiyun pr_err("power off when %d task running!!\n", total_running);
560*4882a593Smuzhiyun mdelay(50);
561*4882a593Smuzhiyun pr_err("delay 50 ms for running task\n");
562*4882a593Smuzhiyun rga_dump();
563*4882a593Smuzhiyun }
564*4882a593Smuzhiyun
565*4882a593Smuzhiyun #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0))
566*4882a593Smuzhiyun pm_runtime_put(rga_drvdata->dev);
567*4882a593Smuzhiyun clk_disable_unprepare(rga_drvdata->aclk_rga);
568*4882a593Smuzhiyun clk_disable_unprepare(rga_drvdata->hclk_rga);
569*4882a593Smuzhiyun #else
570*4882a593Smuzhiyun if (rga_drvdata->pd_rga)
571*4882a593Smuzhiyun clk_disable_unprepare(rga_drvdata->pd_rga);
572*4882a593Smuzhiyun clk_disable_unprepare(rga_drvdata->aclk_rga);
573*4882a593Smuzhiyun clk_disable_unprepare(rga_drvdata->hclk_rga);
574*4882a593Smuzhiyun #endif
575*4882a593Smuzhiyun wake_unlock(&rga_drvdata->wake_lock);
576*4882a593Smuzhiyun rga_service.enable = false;
577*4882a593Smuzhiyun }
578*4882a593Smuzhiyun
rga_power_off_work(struct work_struct * work)579*4882a593Smuzhiyun static void rga_power_off_work(struct work_struct *work)
580*4882a593Smuzhiyun {
581*4882a593Smuzhiyun if (mutex_trylock(&rga_service.lock)) {
582*4882a593Smuzhiyun rga_power_off();
583*4882a593Smuzhiyun mutex_unlock(&rga_service.lock);
584*4882a593Smuzhiyun } else {
585*4882a593Smuzhiyun /* Come back later if the device is busy... */
586*4882a593Smuzhiyun
587*4882a593Smuzhiyun rga_queue_power_off_work();
588*4882a593Smuzhiyun }
589*4882a593Smuzhiyun }
590*4882a593Smuzhiyun
rga_flush(rga_session * session,unsigned long arg)591*4882a593Smuzhiyun static int rga_flush(rga_session *session, unsigned long arg)
592*4882a593Smuzhiyun {
593*4882a593Smuzhiyun int ret = 0;
594*4882a593Smuzhiyun int ret_timeout;
595*4882a593Smuzhiyun
596*4882a593Smuzhiyun #if RGA_TEST_FLUSH_TIME
597*4882a593Smuzhiyun ktime_t start;
598*4882a593Smuzhiyun ktime_t end;
599*4882a593Smuzhiyun start = ktime_get();
600*4882a593Smuzhiyun #endif
601*4882a593Smuzhiyun
602*4882a593Smuzhiyun ret_timeout = wait_event_timeout(session->wait, atomic_read(&session->done), RGA_TIMEOUT_DELAY);
603*4882a593Smuzhiyun
604*4882a593Smuzhiyun if (unlikely(ret_timeout < 0)) {
605*4882a593Smuzhiyun //pr_err("flush pid %d wait task ret %d\n", session->pid, ret);
606*4882a593Smuzhiyun mutex_lock(&rga_service.lock);
607*4882a593Smuzhiyun rga_del_running_list();
608*4882a593Smuzhiyun mutex_unlock(&rga_service.lock);
609*4882a593Smuzhiyun ret = ret_timeout;
610*4882a593Smuzhiyun } else if (0 == ret_timeout) {
611*4882a593Smuzhiyun //pr_err("flush pid %d wait %d task done timeout\n", session->pid, atomic_read(&session->task_running));
612*4882a593Smuzhiyun //printk("bus = %.8x\n", rga_read(RGA_INT));
613*4882a593Smuzhiyun mutex_lock(&rga_service.lock);
614*4882a593Smuzhiyun rga_del_running_list_timeout();
615*4882a593Smuzhiyun rga_try_set_reg();
616*4882a593Smuzhiyun mutex_unlock(&rga_service.lock);
617*4882a593Smuzhiyun ret = -ETIMEDOUT;
618*4882a593Smuzhiyun }
619*4882a593Smuzhiyun
620*4882a593Smuzhiyun #if RGA_TEST_FLUSH_TIME
621*4882a593Smuzhiyun end = ktime_get();
622*4882a593Smuzhiyun end = ktime_sub(end, start);
623*4882a593Smuzhiyun printk("one flush wait time %d\n", (int)ktime_to_us(end));
624*4882a593Smuzhiyun #endif
625*4882a593Smuzhiyun
626*4882a593Smuzhiyun return ret;
627*4882a593Smuzhiyun }
628*4882a593Smuzhiyun
629*4882a593Smuzhiyun
rga_get_result(rga_session * session,unsigned long arg)630*4882a593Smuzhiyun static int rga_get_result(rga_session *session, unsigned long arg)
631*4882a593Smuzhiyun {
632*4882a593Smuzhiyun //printk("rga_get_result %d\n",rga_drvdata->rga_result);
633*4882a593Smuzhiyun
634*4882a593Smuzhiyun int ret = 0;
635*4882a593Smuzhiyun
636*4882a593Smuzhiyun int num_done;
637*4882a593Smuzhiyun
638*4882a593Smuzhiyun num_done = atomic_read(&session->num_done);
639*4882a593Smuzhiyun
640*4882a593Smuzhiyun if (unlikely(copy_to_user((void __user *)arg, &num_done, sizeof(int)))) {
641*4882a593Smuzhiyun printk("copy_to_user failed\n");
642*4882a593Smuzhiyun ret = -EFAULT;
643*4882a593Smuzhiyun }
644*4882a593Smuzhiyun return ret;
645*4882a593Smuzhiyun }
646*4882a593Smuzhiyun
647*4882a593Smuzhiyun
rga_check_param(const struct rga_req * req)648*4882a593Smuzhiyun static int rga_check_param(const struct rga_req *req)
649*4882a593Smuzhiyun {
650*4882a593Smuzhiyun /*RGA can support up to 8192*8192 resolution in RGB format,but we limit the image size to 8191*8191 here*/
651*4882a593Smuzhiyun //check src width and height
652*4882a593Smuzhiyun
653*4882a593Smuzhiyun if(!((req->render_mode == color_fill_mode) || (req->render_mode == line_point_drawing_mode)))
654*4882a593Smuzhiyun {
655*4882a593Smuzhiyun if (unlikely((req->src.act_w <= 0) || (req->src.act_w > 8191) || (req->src.act_h <= 0) || (req->src.act_h > 8191)))
656*4882a593Smuzhiyun {
657*4882a593Smuzhiyun printk("invalid source resolution act_w = %d, act_h = %d\n", req->src.act_w, req->src.act_h);
658*4882a593Smuzhiyun return -EINVAL;
659*4882a593Smuzhiyun }
660*4882a593Smuzhiyun }
661*4882a593Smuzhiyun
662*4882a593Smuzhiyun if(!((req->render_mode == color_fill_mode) || (req->render_mode == line_point_drawing_mode)))
663*4882a593Smuzhiyun {
664*4882a593Smuzhiyun if (unlikely((req->src.vir_w <= 0) || (req->src.vir_w > 8191) || (req->src.vir_h <= 0) || (req->src.vir_h > 8191)))
665*4882a593Smuzhiyun {
666*4882a593Smuzhiyun printk("invalid source resolution vir_w = %d, vir_h = %d\n", req->src.vir_w, req->src.vir_h);
667*4882a593Smuzhiyun return -EINVAL;
668*4882a593Smuzhiyun }
669*4882a593Smuzhiyun }
670*4882a593Smuzhiyun
671*4882a593Smuzhiyun //check dst width and height
672*4882a593Smuzhiyun if (unlikely((req->dst.act_w <= 0) || (req->dst.act_w > 2048) || (req->dst.act_h <= 0) || (req->dst.act_h > 2048)))
673*4882a593Smuzhiyun {
674*4882a593Smuzhiyun printk("invalid destination resolution act_w = %d, act_h = %d\n", req->dst.act_w, req->dst.act_h);
675*4882a593Smuzhiyun return -EINVAL;
676*4882a593Smuzhiyun }
677*4882a593Smuzhiyun
678*4882a593Smuzhiyun if (unlikely((req->dst.vir_w <= 0) || (req->dst.vir_w > 4096) || (req->dst.vir_h <= 0) || (req->dst.vir_h > 2048)))
679*4882a593Smuzhiyun {
680*4882a593Smuzhiyun printk("invalid destination resolution vir_w = %d, vir_h = %d\n", req->dst.vir_w, req->dst.vir_h);
681*4882a593Smuzhiyun return -EINVAL;
682*4882a593Smuzhiyun }
683*4882a593Smuzhiyun
684*4882a593Smuzhiyun //check src_vir_w
685*4882a593Smuzhiyun if(unlikely(req->src.vir_w < req->src.act_w)){
686*4882a593Smuzhiyun printk("invalid src_vir_w act_w = %d, vir_w = %d\n", req->src.act_w, req->src.vir_w);
687*4882a593Smuzhiyun return -EINVAL;
688*4882a593Smuzhiyun }
689*4882a593Smuzhiyun
690*4882a593Smuzhiyun //check dst_vir_w
691*4882a593Smuzhiyun if(unlikely(req->dst.vir_w < req->dst.act_w)){
692*4882a593Smuzhiyun if(req->rotate_mode != 1)
693*4882a593Smuzhiyun {
694*4882a593Smuzhiyun printk("invalid dst_vir_w act_h = %d, vir_h = %d\n", req->dst.act_w, req->dst.vir_w);
695*4882a593Smuzhiyun return -EINVAL;
696*4882a593Smuzhiyun }
697*4882a593Smuzhiyun }
698*4882a593Smuzhiyun
699*4882a593Smuzhiyun return 0;
700*4882a593Smuzhiyun }
701*4882a593Smuzhiyun
rga_copy_reg(struct rga_reg * reg,uint32_t offset)702*4882a593Smuzhiyun static void rga_copy_reg(struct rga_reg *reg, uint32_t offset)
703*4882a593Smuzhiyun {
704*4882a593Smuzhiyun uint32_t i;
705*4882a593Smuzhiyun uint32_t *cmd_buf;
706*4882a593Smuzhiyun uint32_t *reg_p;
707*4882a593Smuzhiyun
708*4882a593Smuzhiyun if(atomic_read(®->session->task_running) != 0)
709*4882a593Smuzhiyun {
710*4882a593Smuzhiyun printk(KERN_ERR "task_running is no zero\n");
711*4882a593Smuzhiyun }
712*4882a593Smuzhiyun
713*4882a593Smuzhiyun atomic_add(1, &rga_service.cmd_num);
714*4882a593Smuzhiyun atomic_add(1, ®->session->task_running);
715*4882a593Smuzhiyun
716*4882a593Smuzhiyun cmd_buf = (uint32_t *)rga_service.cmd_buff + offset*32;
717*4882a593Smuzhiyun reg_p = (uint32_t *)reg->cmd_reg;
718*4882a593Smuzhiyun
719*4882a593Smuzhiyun for(i=0; i<32; i++)
720*4882a593Smuzhiyun cmd_buf[i] = reg_p[i];
721*4882a593Smuzhiyun }
722*4882a593Smuzhiyun
rga_reg_init(rga_session * session,struct rga_req * req)723*4882a593Smuzhiyun static struct rga_reg * rga_reg_init(rga_session *session, struct rga_req *req)
724*4882a593Smuzhiyun {
725*4882a593Smuzhiyun int32_t ret;
726*4882a593Smuzhiyun struct rga_reg *reg = kzalloc(sizeof(struct rga_reg), GFP_KERNEL);
727*4882a593Smuzhiyun if (NULL == reg) {
728*4882a593Smuzhiyun pr_err("kmalloc fail in rga_reg_init\n");
729*4882a593Smuzhiyun return NULL;
730*4882a593Smuzhiyun }
731*4882a593Smuzhiyun
732*4882a593Smuzhiyun reg->session = session;
733*4882a593Smuzhiyun INIT_LIST_HEAD(®->session_link);
734*4882a593Smuzhiyun INIT_LIST_HEAD(®->status_link);
735*4882a593Smuzhiyun
736*4882a593Smuzhiyun reg->MMU_base = NULL;
737*4882a593Smuzhiyun
738*4882a593Smuzhiyun if (req->mmu_info.mmu_en)
739*4882a593Smuzhiyun {
740*4882a593Smuzhiyun ret = rga_set_mmu_info(reg, req);
741*4882a593Smuzhiyun if(ret < 0)
742*4882a593Smuzhiyun {
743*4882a593Smuzhiyun printk("%s, [%d] set mmu info error \n", __FUNCTION__, __LINE__);
744*4882a593Smuzhiyun if(reg != NULL)
745*4882a593Smuzhiyun {
746*4882a593Smuzhiyun kfree(reg);
747*4882a593Smuzhiyun }
748*4882a593Smuzhiyun return NULL;
749*4882a593Smuzhiyun }
750*4882a593Smuzhiyun }
751*4882a593Smuzhiyun
752*4882a593Smuzhiyun if(RGA_gen_reg_info(req, (uint8_t *)reg->cmd_reg) == -1)
753*4882a593Smuzhiyun {
754*4882a593Smuzhiyun printk("gen reg info error\n");
755*4882a593Smuzhiyun if(reg != NULL)
756*4882a593Smuzhiyun {
757*4882a593Smuzhiyun kfree(reg);
758*4882a593Smuzhiyun }
759*4882a593Smuzhiyun return NULL;
760*4882a593Smuzhiyun }
761*4882a593Smuzhiyun #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0))
762*4882a593Smuzhiyun reg->sg_src = req->sg_src;
763*4882a593Smuzhiyun reg->sg_dst = req->sg_dst;
764*4882a593Smuzhiyun reg->attach_src = req->attach_src;
765*4882a593Smuzhiyun reg->attach_dst = req->attach_dst;
766*4882a593Smuzhiyun #endif
767*4882a593Smuzhiyun
768*4882a593Smuzhiyun mutex_lock(&rga_service.lock);
769*4882a593Smuzhiyun list_add_tail(®->status_link, &rga_service.waiting);
770*4882a593Smuzhiyun list_add_tail(®->session_link, &session->waiting);
771*4882a593Smuzhiyun mutex_unlock(&rga_service.lock);
772*4882a593Smuzhiyun
773*4882a593Smuzhiyun return reg;
774*4882a593Smuzhiyun }
775*4882a593Smuzhiyun
776*4882a593Smuzhiyun /* Caller must hold rga_service.lock */
rga_reg_deinit(struct rga_reg * reg)777*4882a593Smuzhiyun static void rga_reg_deinit(struct rga_reg *reg)
778*4882a593Smuzhiyun {
779*4882a593Smuzhiyun list_del_init(®->session_link);
780*4882a593Smuzhiyun list_del_init(®->status_link);
781*4882a593Smuzhiyun kfree(reg);
782*4882a593Smuzhiyun }
783*4882a593Smuzhiyun
784*4882a593Smuzhiyun /* Caller must hold rga_service.lock */
rga_reg_from_wait_to_run(struct rga_reg * reg)785*4882a593Smuzhiyun static void rga_reg_from_wait_to_run(struct rga_reg *reg)
786*4882a593Smuzhiyun {
787*4882a593Smuzhiyun list_del_init(®->status_link);
788*4882a593Smuzhiyun list_add_tail(®->status_link, &rga_service.running);
789*4882a593Smuzhiyun
790*4882a593Smuzhiyun list_del_init(®->session_link);
791*4882a593Smuzhiyun list_add_tail(®->session_link, ®->session->running);
792*4882a593Smuzhiyun }
793*4882a593Smuzhiyun
794*4882a593Smuzhiyun /* Caller must hold rga_service.lock */
rga_service_session_clear(rga_session * session)795*4882a593Smuzhiyun static void rga_service_session_clear(rga_session *session)
796*4882a593Smuzhiyun {
797*4882a593Smuzhiyun struct rga_reg *reg, *n;
798*4882a593Smuzhiyun
799*4882a593Smuzhiyun list_for_each_entry_safe(reg, n, &session->waiting, session_link)
800*4882a593Smuzhiyun {
801*4882a593Smuzhiyun rga_reg_deinit(reg);
802*4882a593Smuzhiyun }
803*4882a593Smuzhiyun
804*4882a593Smuzhiyun list_for_each_entry_safe(reg, n, &session->running, session_link)
805*4882a593Smuzhiyun {
806*4882a593Smuzhiyun rga_reg_deinit(reg);
807*4882a593Smuzhiyun }
808*4882a593Smuzhiyun }
809*4882a593Smuzhiyun
810*4882a593Smuzhiyun /* Caller must hold rga_service.lock */
rga_try_set_reg(void)811*4882a593Smuzhiyun static void rga_try_set_reg(void)
812*4882a593Smuzhiyun {
813*4882a593Smuzhiyun struct rga_reg *reg ;
814*4882a593Smuzhiyun
815*4882a593Smuzhiyun if (list_empty(&rga_service.running))
816*4882a593Smuzhiyun {
817*4882a593Smuzhiyun if (!list_empty(&rga_service.waiting))
818*4882a593Smuzhiyun {
819*4882a593Smuzhiyun /* RGA is idle */
820*4882a593Smuzhiyun reg = list_entry(rga_service.waiting.next, struct rga_reg, status_link);
821*4882a593Smuzhiyun
822*4882a593Smuzhiyun rga_power_on();
823*4882a593Smuzhiyun udelay(1);
824*4882a593Smuzhiyun
825*4882a593Smuzhiyun rga_copy_reg(reg, 0);
826*4882a593Smuzhiyun rga_reg_from_wait_to_run(reg);
827*4882a593Smuzhiyun rga_dma_flush_range(&rga_service.cmd_buff[0], &rga_service.cmd_buff[32]);
828*4882a593Smuzhiyun
829*4882a593Smuzhiyun rga_soft_reset();
830*4882a593Smuzhiyun
831*4882a593Smuzhiyun rga_write(0x0, RGA_SYS_CTRL);
832*4882a593Smuzhiyun rga_write(0, RGA_MMU_CTRL);
833*4882a593Smuzhiyun
834*4882a593Smuzhiyun /* CMD buff */
835*4882a593Smuzhiyun rga_write(virt_to_phys(rga_service.cmd_buff), RGA_CMD_ADDR);
836*4882a593Smuzhiyun
837*4882a593Smuzhiyun #if RGA_DEBUGFS
838*4882a593Smuzhiyun if (RGA_TEST_REG) {
839*4882a593Smuzhiyun //printk(KERN_DEBUG "cmd_addr = %.8x\n", rga_read(RGA_CMD_ADDR));
840*4882a593Smuzhiyun uint32_t i;
841*4882a593Smuzhiyun uint32_t *p;
842*4882a593Smuzhiyun p = rga_service.cmd_buff;
843*4882a593Smuzhiyun printk("CMD_REG\n");
844*4882a593Smuzhiyun for (i=0; i<7; i++)
845*4882a593Smuzhiyun printk("%.8x %.8x %.8x %.8x\n", p[0 + i*4], p[1+i*4], p[2 + i*4], p[3 + i*4]);
846*4882a593Smuzhiyun printk("%.8x %.8x\n", p[0 + i*4], p[1+i*4]);
847*4882a593Smuzhiyun }
848*4882a593Smuzhiyun #endif
849*4882a593Smuzhiyun
850*4882a593Smuzhiyun /* master mode */
851*4882a593Smuzhiyun rga_write((0x1<<2)|(0x1<<3), RGA_SYS_CTRL);
852*4882a593Smuzhiyun
853*4882a593Smuzhiyun /* All CMD finish int */
854*4882a593Smuzhiyun rga_write(rga_read(RGA_INT)|(0x1<<10)|(0x1<<9)|(0x1<<8), RGA_INT);
855*4882a593Smuzhiyun
856*4882a593Smuzhiyun #if RGA_DEBUGFS
857*4882a593Smuzhiyun if (RGA_TEST_TIME)
858*4882a593Smuzhiyun rga_start = ktime_get();
859*4882a593Smuzhiyun #endif
860*4882a593Smuzhiyun
861*4882a593Smuzhiyun /* Start proc */
862*4882a593Smuzhiyun atomic_set(®->session->done, 0);
863*4882a593Smuzhiyun rga_write(0x1, RGA_CMD_CTRL);
864*4882a593Smuzhiyun
865*4882a593Smuzhiyun #if RGA_DEBUGFS
866*4882a593Smuzhiyun if (RGA_TEST_REG) {
867*4882a593Smuzhiyun uint32_t i;
868*4882a593Smuzhiyun printk("CMD_READ_BACK_REG\n");
869*4882a593Smuzhiyun for (i=0; i<7; i++)
870*4882a593Smuzhiyun printk("%.8x %.8x %.8x %.8x\n", rga_read(0x100 + i*16 + 0),
871*4882a593Smuzhiyun rga_read(0x100 + i*16 + 4), rga_read(0x100 + i*16 + 8), rga_read(0x100 + i*16 + 12));
872*4882a593Smuzhiyun printk("%.8x %.8x\n", rga_read(0x100 + i*16 + 0), rga_read(0x100 + i*16 + 4));
873*4882a593Smuzhiyun }
874*4882a593Smuzhiyun #endif
875*4882a593Smuzhiyun }
876*4882a593Smuzhiyun }
877*4882a593Smuzhiyun }
878*4882a593Smuzhiyun
879*4882a593Smuzhiyun #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0))
rga_put_dma_buf(struct rga_req * req,struct rga_reg * reg)880*4882a593Smuzhiyun static int rga_put_dma_buf(struct rga_req *req, struct rga_reg *reg)
881*4882a593Smuzhiyun {
882*4882a593Smuzhiyun struct dma_buf_attachment *attach = NULL;
883*4882a593Smuzhiyun struct sg_table *sgt = NULL;
884*4882a593Smuzhiyun struct dma_buf *dma_buf = NULL;
885*4882a593Smuzhiyun
886*4882a593Smuzhiyun if (!req && !reg)
887*4882a593Smuzhiyun return -EINVAL;
888*4882a593Smuzhiyun
889*4882a593Smuzhiyun attach = (!reg) ? req->attach_src : reg->attach_src;
890*4882a593Smuzhiyun sgt = (!reg) ? req->sg_src : reg->sg_src;
891*4882a593Smuzhiyun if (attach && sgt)
892*4882a593Smuzhiyun dma_buf_unmap_attachment(attach, sgt, DMA_BIDIRECTIONAL);
893*4882a593Smuzhiyun if (attach) {
894*4882a593Smuzhiyun dma_buf = attach->dmabuf;
895*4882a593Smuzhiyun dma_buf_detach(dma_buf, attach);
896*4882a593Smuzhiyun dma_buf_put(dma_buf);
897*4882a593Smuzhiyun }
898*4882a593Smuzhiyun
899*4882a593Smuzhiyun attach = (!reg) ? req->attach_dst : reg->attach_dst;
900*4882a593Smuzhiyun sgt = (!reg) ? req->sg_dst : reg->sg_dst;
901*4882a593Smuzhiyun if (attach && sgt)
902*4882a593Smuzhiyun dma_buf_unmap_attachment(attach, sgt, DMA_BIDIRECTIONAL);
903*4882a593Smuzhiyun if (attach) {
904*4882a593Smuzhiyun dma_buf = attach->dmabuf;
905*4882a593Smuzhiyun dma_buf_detach(dma_buf, attach);
906*4882a593Smuzhiyun dma_buf_put(dma_buf);
907*4882a593Smuzhiyun }
908*4882a593Smuzhiyun
909*4882a593Smuzhiyun return 0;
910*4882a593Smuzhiyun }
911*4882a593Smuzhiyun #endif
912*4882a593Smuzhiyun /* Caller must hold rga_service.lock */
rga_del_running_list(void)913*4882a593Smuzhiyun static void rga_del_running_list(void)
914*4882a593Smuzhiyun {
915*4882a593Smuzhiyun struct rga_reg *reg;
916*4882a593Smuzhiyun
917*4882a593Smuzhiyun while(!list_empty(&rga_service.running))
918*4882a593Smuzhiyun {
919*4882a593Smuzhiyun reg = list_entry(rga_service.running.next, struct rga_reg, status_link);
920*4882a593Smuzhiyun
921*4882a593Smuzhiyun if(reg->MMU_len != 0)
922*4882a593Smuzhiyun {
923*4882a593Smuzhiyun if (rga_mmu_buf.back + reg->MMU_len > 2*rga_mmu_buf.size)
924*4882a593Smuzhiyun rga_mmu_buf.back = reg->MMU_len + rga_mmu_buf.size;
925*4882a593Smuzhiyun else
926*4882a593Smuzhiyun rga_mmu_buf.back += reg->MMU_len;
927*4882a593Smuzhiyun }
928*4882a593Smuzhiyun #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0))
929*4882a593Smuzhiyun rga_put_dma_buf(NULL, reg);
930*4882a593Smuzhiyun #endif
931*4882a593Smuzhiyun
932*4882a593Smuzhiyun atomic_sub(1, ®->session->task_running);
933*4882a593Smuzhiyun atomic_sub(1, &rga_service.total_running);
934*4882a593Smuzhiyun
935*4882a593Smuzhiyun if(list_empty(®->session->waiting))
936*4882a593Smuzhiyun {
937*4882a593Smuzhiyun atomic_set(®->session->done, 1);
938*4882a593Smuzhiyun wake_up(®->session->wait);
939*4882a593Smuzhiyun }
940*4882a593Smuzhiyun
941*4882a593Smuzhiyun rga_reg_deinit(reg);
942*4882a593Smuzhiyun }
943*4882a593Smuzhiyun }
944*4882a593Smuzhiyun
945*4882a593Smuzhiyun /* Caller must hold rga_service.lock */
rga_del_running_list_timeout(void)946*4882a593Smuzhiyun static void rga_del_running_list_timeout(void)
947*4882a593Smuzhiyun {
948*4882a593Smuzhiyun struct rga_reg *reg;
949*4882a593Smuzhiyun
950*4882a593Smuzhiyun while(!list_empty(&rga_service.running))
951*4882a593Smuzhiyun {
952*4882a593Smuzhiyun reg = list_entry(rga_service.running.next, struct rga_reg, status_link);
953*4882a593Smuzhiyun
954*4882a593Smuzhiyun if(reg->MMU_len != 0)
955*4882a593Smuzhiyun {
956*4882a593Smuzhiyun if (rga_mmu_buf.back + reg->MMU_len > 2*rga_mmu_buf.size)
957*4882a593Smuzhiyun rga_mmu_buf.back = reg->MMU_len + rga_mmu_buf.size;
958*4882a593Smuzhiyun else
959*4882a593Smuzhiyun rga_mmu_buf.back += reg->MMU_len;
960*4882a593Smuzhiyun }
961*4882a593Smuzhiyun #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0))
962*4882a593Smuzhiyun rga_put_dma_buf(NULL, reg);
963*4882a593Smuzhiyun #endif
964*4882a593Smuzhiyun atomic_sub(1, ®->session->task_running);
965*4882a593Smuzhiyun atomic_sub(1, &rga_service.total_running);
966*4882a593Smuzhiyun
967*4882a593Smuzhiyun //printk("RGA soft reset for timeout process\n");
968*4882a593Smuzhiyun rga_soft_reset();
969*4882a593Smuzhiyun
970*4882a593Smuzhiyun
971*4882a593Smuzhiyun #if 0
972*4882a593Smuzhiyun printk("RGA_INT is %.8x\n", rga_read(RGA_INT));
973*4882a593Smuzhiyun printk("reg->session->task_running = %d\n", atomic_read(®->session->task_running));
974*4882a593Smuzhiyun printk("rga_service.total_running = %d\n", atomic_read(&rga_service.total_running));
975*4882a593Smuzhiyun
976*4882a593Smuzhiyun print_info(®->req);
977*4882a593Smuzhiyun
978*4882a593Smuzhiyun {
979*4882a593Smuzhiyun uint32_t *p, i;
980*4882a593Smuzhiyun p = reg->cmd_reg;
981*4882a593Smuzhiyun for (i=0; i<7; i++)
982*4882a593Smuzhiyun printk("%.8x %.8x %.8x %.8x\n", p[0 + i*4], p[1+i*4], p[2 + i*4], p[3 + i*4]);
983*4882a593Smuzhiyun
984*4882a593Smuzhiyun }
985*4882a593Smuzhiyun #endif
986*4882a593Smuzhiyun
987*4882a593Smuzhiyun if(list_empty(®->session->waiting))
988*4882a593Smuzhiyun {
989*4882a593Smuzhiyun atomic_set(®->session->done, 1);
990*4882a593Smuzhiyun wake_up(®->session->wait);
991*4882a593Smuzhiyun }
992*4882a593Smuzhiyun
993*4882a593Smuzhiyun rga_reg_deinit(reg);
994*4882a593Smuzhiyun }
995*4882a593Smuzhiyun }
996*4882a593Smuzhiyun
997*4882a593Smuzhiyun #if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 4, 0))
rga_convert_dma_buf(struct rga_req * req)998*4882a593Smuzhiyun static int rga_convert_dma_buf(struct rga_req *req)
999*4882a593Smuzhiyun {
1000*4882a593Smuzhiyun struct ion_handle *hdl;
1001*4882a593Smuzhiyun ion_phys_addr_t phy_addr;
1002*4882a593Smuzhiyun size_t len;
1003*4882a593Smuzhiyun int ret;
1004*4882a593Smuzhiyun u32 src_offset, dst_offset;
1005*4882a593Smuzhiyun void *vaddr;
1006*4882a593Smuzhiyun
1007*4882a593Smuzhiyun req->sg_src = NULL;
1008*4882a593Smuzhiyun req->sg_dst = NULL;
1009*4882a593Smuzhiyun
1010*4882a593Smuzhiyun src_offset = req->line_draw_info.flag;
1011*4882a593Smuzhiyun dst_offset = req->line_draw_info.line_width;
1012*4882a593Smuzhiyun
1013*4882a593Smuzhiyun if (req->src.yrgb_addr) {
1014*4882a593Smuzhiyun hdl = ion_import_dma_buf(rga_drvdata->ion_client, req->src.yrgb_addr);
1015*4882a593Smuzhiyun if (IS_ERR(hdl)) {
1016*4882a593Smuzhiyun ret = PTR_ERR(hdl);
1017*4882a593Smuzhiyun pr_err("RGA ERROR ion buf handle\n");
1018*4882a593Smuzhiyun return ret;
1019*4882a593Smuzhiyun }
1020*4882a593Smuzhiyun
1021*4882a593Smuzhiyun if (req->src.uv_addr) {
1022*4882a593Smuzhiyun if (RGA_TEST_MSG)
1023*4882a593Smuzhiyun pr_err("WARNING : don't input viraddrs when already input fd !\n");
1024*4882a593Smuzhiyun req->src.uv_addr = 0;
1025*4882a593Smuzhiyun }
1026*4882a593Smuzhiyun
1027*4882a593Smuzhiyun #if RGA_DEBUGFS
1028*4882a593Smuzhiyun if (RGA_CHECK_MODE) {
1029*4882a593Smuzhiyun vaddr = ion_map_kernel(rga_drvdata->ion_client, hdl);
1030*4882a593Smuzhiyun if (vaddr)
1031*4882a593Smuzhiyun rga_memory_check(vaddr, req->src.vir_h, req->src.vir_w,
1032*4882a593Smuzhiyun req->src.format, req->src.yrgb_addr);
1033*4882a593Smuzhiyun ion_unmap_kernel(rga_drvdata->ion_client, hdl);
1034*4882a593Smuzhiyun }
1035*4882a593Smuzhiyun #endif
1036*4882a593Smuzhiyun if ((req->mmu_info.mmu_flag >> 8) & 1) {
1037*4882a593Smuzhiyun req->sg_src = ion_sg_table(rga_drvdata->ion_client, hdl);
1038*4882a593Smuzhiyun req->src.yrgb_addr = req->src.uv_addr;
1039*4882a593Smuzhiyun req->src.uv_addr = req->src.yrgb_addr + (req->src.vir_w * req->src.vir_h);
1040*4882a593Smuzhiyun req->src.v_addr = req->src.uv_addr + (req->src.vir_w * req->src.vir_h)/4;
1041*4882a593Smuzhiyun }
1042*4882a593Smuzhiyun else {
1043*4882a593Smuzhiyun ion_phys(rga_drvdata->ion_client, hdl, &phy_addr, &len);
1044*4882a593Smuzhiyun req->src.yrgb_addr = phy_addr + src_offset;
1045*4882a593Smuzhiyun req->src.uv_addr = req->src.yrgb_addr + (req->src.vir_w * req->src.vir_h);
1046*4882a593Smuzhiyun req->src.v_addr = req->src.uv_addr + (req->src.vir_w * req->src.vir_h)/4;
1047*4882a593Smuzhiyun }
1048*4882a593Smuzhiyun ion_free(rga_drvdata->ion_client, hdl);
1049*4882a593Smuzhiyun }
1050*4882a593Smuzhiyun else {
1051*4882a593Smuzhiyun req->src.yrgb_addr = req->src.uv_addr;
1052*4882a593Smuzhiyun req->src.uv_addr = req->src.yrgb_addr + (req->src.vir_w * req->src.vir_h);
1053*4882a593Smuzhiyun req->src.v_addr = req->src.uv_addr + (req->src.vir_w * req->src.vir_h)/4;
1054*4882a593Smuzhiyun }
1055*4882a593Smuzhiyun
1056*4882a593Smuzhiyun if(req->dst.yrgb_addr) {
1057*4882a593Smuzhiyun hdl = ion_import_dma_buf(rga_drvdata->ion_client, req->dst.yrgb_addr);
1058*4882a593Smuzhiyun if (IS_ERR(hdl)) {
1059*4882a593Smuzhiyun ret = PTR_ERR(hdl);
1060*4882a593Smuzhiyun printk("RGA2 ERROR ion buf handle\n");
1061*4882a593Smuzhiyun return ret;
1062*4882a593Smuzhiyun }
1063*4882a593Smuzhiyun
1064*4882a593Smuzhiyun if (req->dst.uv_addr) {
1065*4882a593Smuzhiyun if (RGA_TEST_MSG)
1066*4882a593Smuzhiyun pr_err("WARNING : don't input viraddrs when already input fd !\n");
1067*4882a593Smuzhiyun req->dst.uv_addr = 0;
1068*4882a593Smuzhiyun }
1069*4882a593Smuzhiyun
1070*4882a593Smuzhiyun #if RGA_DEBUGFS
1071*4882a593Smuzhiyun if (RGA_CHECK_MODE) {
1072*4882a593Smuzhiyun vaddr = ion_map_kernel(rga_drvdata->ion_client, hdl);
1073*4882a593Smuzhiyun if (vaddr)
1074*4882a593Smuzhiyun rga_memory_check(vaddr, req->src.vir_h, req->src.vir_w,
1075*4882a593Smuzhiyun req->src.format, req->src.yrgb_addr);
1076*4882a593Smuzhiyun ion_unmap_kernel(rga_drvdata->ion_client, hdl);
1077*4882a593Smuzhiyun }
1078*4882a593Smuzhiyun #endif
1079*4882a593Smuzhiyun if ((req->mmu_info.mmu_flag >> 10) & 1) {
1080*4882a593Smuzhiyun req->sg_dst = ion_sg_table(rga_drvdata->ion_client, hdl);
1081*4882a593Smuzhiyun req->dst.yrgb_addr = req->dst.uv_addr;
1082*4882a593Smuzhiyun req->dst.uv_addr = req->dst.yrgb_addr + (req->dst.vir_w * req->dst.vir_h);
1083*4882a593Smuzhiyun req->dst.v_addr = req->dst.uv_addr + (req->dst.vir_w * req->dst.vir_h)/4;
1084*4882a593Smuzhiyun }
1085*4882a593Smuzhiyun else {
1086*4882a593Smuzhiyun ion_phys(rga_drvdata->ion_client, hdl, &phy_addr, &len);
1087*4882a593Smuzhiyun req->dst.yrgb_addr = phy_addr + dst_offset;
1088*4882a593Smuzhiyun req->dst.uv_addr = req->dst.yrgb_addr + (req->dst.vir_w * req->dst.vir_h);
1089*4882a593Smuzhiyun req->dst.v_addr = req->dst.uv_addr + (req->dst.vir_w * req->dst.vir_h)/4;
1090*4882a593Smuzhiyun }
1091*4882a593Smuzhiyun ion_free(rga_drvdata->ion_client, hdl);
1092*4882a593Smuzhiyun }
1093*4882a593Smuzhiyun else {
1094*4882a593Smuzhiyun req->dst.yrgb_addr = req->dst.uv_addr;
1095*4882a593Smuzhiyun req->dst.uv_addr = req->dst.yrgb_addr + (req->dst.vir_w * req->dst.vir_h);
1096*4882a593Smuzhiyun req->dst.v_addr = req->dst.uv_addr + (req->dst.vir_w * req->dst.vir_h)/4;
1097*4882a593Smuzhiyun }
1098*4882a593Smuzhiyun
1099*4882a593Smuzhiyun return 0;
1100*4882a593Smuzhiyun }
1101*4882a593Smuzhiyun #endif
1102*4882a593Smuzhiyun
1103*4882a593Smuzhiyun #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0))
rga_get_img_info(rga_img_info_t * img,u8 mmu_flag,struct sg_table ** psgt,struct dma_buf_attachment ** pattach)1104*4882a593Smuzhiyun static int rga_get_img_info(rga_img_info_t *img,
1105*4882a593Smuzhiyun u8 mmu_flag,
1106*4882a593Smuzhiyun struct sg_table **psgt,
1107*4882a593Smuzhiyun struct dma_buf_attachment **pattach)
1108*4882a593Smuzhiyun {
1109*4882a593Smuzhiyun struct dma_buf_attachment *attach = NULL;
1110*4882a593Smuzhiyun struct device *rga_dev = NULL;
1111*4882a593Smuzhiyun struct sg_table *sgt = NULL;
1112*4882a593Smuzhiyun struct dma_buf *dma_buf = NULL;
1113*4882a593Smuzhiyun u32 vir_w, vir_h;
1114*4882a593Smuzhiyun int yrgb_addr = -1;
1115*4882a593Smuzhiyun int ret = 0;
1116*4882a593Smuzhiyun void *vaddr = NULL;
1117*4882a593Smuzhiyun
1118*4882a593Smuzhiyun rga_dev = rga_drvdata->dev;
1119*4882a593Smuzhiyun yrgb_addr = (int)img->yrgb_addr;
1120*4882a593Smuzhiyun vir_w = img->vir_w;
1121*4882a593Smuzhiyun vir_h = img->vir_h;
1122*4882a593Smuzhiyun
1123*4882a593Smuzhiyun if (yrgb_addr > 0) {
1124*4882a593Smuzhiyun dma_buf = dma_buf_get(img->yrgb_addr);
1125*4882a593Smuzhiyun if (IS_ERR(dma_buf)) {
1126*4882a593Smuzhiyun ret = -EINVAL;
1127*4882a593Smuzhiyun pr_err("dma_buf_get fail fd[%d]\n", yrgb_addr);
1128*4882a593Smuzhiyun return ret;
1129*4882a593Smuzhiyun }
1130*4882a593Smuzhiyun
1131*4882a593Smuzhiyun attach = dma_buf_attach(dma_buf, rga_dev);
1132*4882a593Smuzhiyun if (IS_ERR(attach)) {
1133*4882a593Smuzhiyun dma_buf_put(dma_buf);
1134*4882a593Smuzhiyun ret = -EINVAL;
1135*4882a593Smuzhiyun pr_err("Failed to attach dma_buf\n");
1136*4882a593Smuzhiyun return ret;
1137*4882a593Smuzhiyun }
1138*4882a593Smuzhiyun #if RGA_DEBUGFS
1139*4882a593Smuzhiyun if (RGA_CHECK_MODE) {
1140*4882a593Smuzhiyun vaddr = dma_buf_vmap(dma_buf);
1141*4882a593Smuzhiyun if (vaddr)
1142*4882a593Smuzhiyun rga_memory_check(vaddr, img->vir_w, img->vir_h,
1143*4882a593Smuzhiyun img->format, img->yrgb_addr);
1144*4882a593Smuzhiyun dma_buf_vunmap(dma_buf, vaddr);
1145*4882a593Smuzhiyun }
1146*4882a593Smuzhiyun #endif
1147*4882a593Smuzhiyun *pattach = attach;
1148*4882a593Smuzhiyun sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
1149*4882a593Smuzhiyun if (IS_ERR(sgt)) {
1150*4882a593Smuzhiyun ret = -EINVAL;
1151*4882a593Smuzhiyun pr_err("Failed to map src attachment\n");
1152*4882a593Smuzhiyun goto err_get_sg;
1153*4882a593Smuzhiyun }
1154*4882a593Smuzhiyun if (!mmu_flag) {
1155*4882a593Smuzhiyun ret = -EINVAL;
1156*4882a593Smuzhiyun pr_err("Fix it please enable iommu flag\n");
1157*4882a593Smuzhiyun goto err_get_sg;
1158*4882a593Smuzhiyun }
1159*4882a593Smuzhiyun
1160*4882a593Smuzhiyun if (mmu_flag) {
1161*4882a593Smuzhiyun *psgt = sgt;
1162*4882a593Smuzhiyun img->yrgb_addr = img->uv_addr;
1163*4882a593Smuzhiyun img->uv_addr = img->yrgb_addr + (vir_w * vir_h);
1164*4882a593Smuzhiyun img->v_addr = img->uv_addr + (vir_w * vir_h) / 4;
1165*4882a593Smuzhiyun }
1166*4882a593Smuzhiyun } else {
1167*4882a593Smuzhiyun img->yrgb_addr = img->uv_addr;
1168*4882a593Smuzhiyun img->uv_addr = img->yrgb_addr + (vir_w * vir_h);
1169*4882a593Smuzhiyun img->v_addr = img->uv_addr + (vir_w * vir_h) / 4;
1170*4882a593Smuzhiyun }
1171*4882a593Smuzhiyun
1172*4882a593Smuzhiyun return ret;
1173*4882a593Smuzhiyun
1174*4882a593Smuzhiyun err_get_sg:
1175*4882a593Smuzhiyun if (sgt)
1176*4882a593Smuzhiyun dma_buf_unmap_attachment(attach, sgt, DMA_BIDIRECTIONAL);
1177*4882a593Smuzhiyun if (attach) {
1178*4882a593Smuzhiyun dma_buf = attach->dmabuf;
1179*4882a593Smuzhiyun dma_buf_detach(dma_buf, attach);
1180*4882a593Smuzhiyun *pattach = NULL;
1181*4882a593Smuzhiyun dma_buf_put(dma_buf);
1182*4882a593Smuzhiyun }
1183*4882a593Smuzhiyun return ret;
1184*4882a593Smuzhiyun }
1185*4882a593Smuzhiyun
rga_get_dma_buf(struct rga_req * req)1186*4882a593Smuzhiyun static int rga_get_dma_buf(struct rga_req *req)
1187*4882a593Smuzhiyun {
1188*4882a593Smuzhiyun struct dma_buf *dma_buf = NULL;
1189*4882a593Smuzhiyun u8 mmu_flag = 0;
1190*4882a593Smuzhiyun int ret = 0;
1191*4882a593Smuzhiyun
1192*4882a593Smuzhiyun req->sg_src = NULL;
1193*4882a593Smuzhiyun req->sg_dst = NULL;
1194*4882a593Smuzhiyun req->attach_src = NULL;
1195*4882a593Smuzhiyun req->attach_dst = NULL;
1196*4882a593Smuzhiyun mmu_flag = (req->mmu_info.mmu_flag >> 8) & 1;
1197*4882a593Smuzhiyun ret = rga_get_img_info(&req->src, mmu_flag, &req->sg_src,
1198*4882a593Smuzhiyun &req->attach_src);
1199*4882a593Smuzhiyun if (ret) {
1200*4882a593Smuzhiyun pr_err("src:rga_get_img_info fail\n");
1201*4882a593Smuzhiyun goto err_src;
1202*4882a593Smuzhiyun }
1203*4882a593Smuzhiyun
1204*4882a593Smuzhiyun mmu_flag = (req->mmu_info.mmu_flag >> 10) & 1;
1205*4882a593Smuzhiyun ret = rga_get_img_info(&req->dst, mmu_flag, &req->sg_dst,
1206*4882a593Smuzhiyun &req->attach_dst);
1207*4882a593Smuzhiyun if (ret) {
1208*4882a593Smuzhiyun pr_err("dst:rga_get_img_info fail\n");
1209*4882a593Smuzhiyun goto err_dst;
1210*4882a593Smuzhiyun }
1211*4882a593Smuzhiyun
1212*4882a593Smuzhiyun return ret;
1213*4882a593Smuzhiyun
1214*4882a593Smuzhiyun err_dst:
1215*4882a593Smuzhiyun if (req->sg_src && req->attach_src) {
1216*4882a593Smuzhiyun dma_buf_unmap_attachment(req->attach_src,
1217*4882a593Smuzhiyun req->sg_src, DMA_BIDIRECTIONAL);
1218*4882a593Smuzhiyun dma_buf = req->attach_src->dmabuf;
1219*4882a593Smuzhiyun dma_buf_detach(dma_buf, req->attach_src);
1220*4882a593Smuzhiyun dma_buf_put(dma_buf);
1221*4882a593Smuzhiyun }
1222*4882a593Smuzhiyun err_src:
1223*4882a593Smuzhiyun
1224*4882a593Smuzhiyun return ret;
1225*4882a593Smuzhiyun }
1226*4882a593Smuzhiyun #endif
rga_reg_init_2(rga_session * session,struct rga_req * req0,struct rga_req * req1)1227*4882a593Smuzhiyun static struct rga_reg *rga_reg_init_2(rga_session *session, struct rga_req *req0,
1228*4882a593Smuzhiyun struct rga_req *req1)
1229*4882a593Smuzhiyun {
1230*4882a593Smuzhiyun int32_t ret;
1231*4882a593Smuzhiyun struct rga_reg *reg0, *reg1;
1232*4882a593Smuzhiyun
1233*4882a593Smuzhiyun reg0 = NULL;
1234*4882a593Smuzhiyun reg1 = NULL;
1235*4882a593Smuzhiyun
1236*4882a593Smuzhiyun do {
1237*4882a593Smuzhiyun reg0 = kzalloc(sizeof(*reg0), GFP_KERNEL);
1238*4882a593Smuzhiyun if (!reg0) {
1239*4882a593Smuzhiyun pr_err("%s [%d] kmalloc fail in rga_reg_init\n",
1240*4882a593Smuzhiyun __func__, __LINE__);
1241*4882a593Smuzhiyun break;
1242*4882a593Smuzhiyun }
1243*4882a593Smuzhiyun
1244*4882a593Smuzhiyun reg1 = kzalloc(sizeof(*reg1), GFP_KERNEL);
1245*4882a593Smuzhiyun if (!reg1) {
1246*4882a593Smuzhiyun pr_err("%s [%d] kmalloc fail in rga_reg_init\n",
1247*4882a593Smuzhiyun __func__, __LINE__);
1248*4882a593Smuzhiyun break;
1249*4882a593Smuzhiyun }
1250*4882a593Smuzhiyun
1251*4882a593Smuzhiyun reg0->session = session;
1252*4882a593Smuzhiyun INIT_LIST_HEAD(®0->session_link);
1253*4882a593Smuzhiyun INIT_LIST_HEAD(®0->status_link);
1254*4882a593Smuzhiyun
1255*4882a593Smuzhiyun reg1->session = session;
1256*4882a593Smuzhiyun INIT_LIST_HEAD(®1->session_link);
1257*4882a593Smuzhiyun INIT_LIST_HEAD(®1->status_link);
1258*4882a593Smuzhiyun
1259*4882a593Smuzhiyun req0->mmu_info.mmu_flag &= (~(1 << 10));
1260*4882a593Smuzhiyun if (req0->mmu_info.mmu_en) {
1261*4882a593Smuzhiyun ret = rga_set_mmu_info(reg0, req0);
1262*4882a593Smuzhiyun if (ret < 0) {
1263*4882a593Smuzhiyun pr_err("%s, [%d] set mmu info error\n",
1264*4882a593Smuzhiyun __func__, __LINE__);
1265*4882a593Smuzhiyun break;
1266*4882a593Smuzhiyun }
1267*4882a593Smuzhiyun }
1268*4882a593Smuzhiyun
1269*4882a593Smuzhiyun RGA_gen_reg_info(req0, (uint8_t *)reg0->cmd_reg);
1270*4882a593Smuzhiyun req1->mmu_info.mmu_flag &= (~(1 << 8));
1271*4882a593Smuzhiyun if (req1->mmu_info.mmu_en) {
1272*4882a593Smuzhiyun ret = rga_set_mmu_info(reg1, req1);
1273*4882a593Smuzhiyun if (ret < 0) {
1274*4882a593Smuzhiyun pr_err("%s, [%d] set mmu info error\n",
1275*4882a593Smuzhiyun __func__, __LINE__);
1276*4882a593Smuzhiyun break;
1277*4882a593Smuzhiyun }
1278*4882a593Smuzhiyun }
1279*4882a593Smuzhiyun RGA_gen_reg_info(req1, (uint8_t *)reg1->cmd_reg);
1280*4882a593Smuzhiyun
1281*4882a593Smuzhiyun #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0))
1282*4882a593Smuzhiyun reg1->sg_src = req1->sg_src;
1283*4882a593Smuzhiyun reg1->sg_dst = req1->sg_dst;
1284*4882a593Smuzhiyun reg1->attach_src = req1->attach_src;
1285*4882a593Smuzhiyun reg1->attach_dst = req1->attach_dst;
1286*4882a593Smuzhiyun #endif
1287*4882a593Smuzhiyun
1288*4882a593Smuzhiyun mutex_lock(&rga_service.lock);
1289*4882a593Smuzhiyun list_add_tail(®0->status_link, &rga_service.waiting);
1290*4882a593Smuzhiyun list_add_tail(®0->session_link, &session->waiting);
1291*4882a593Smuzhiyun list_add_tail(®1->status_link, &rga_service.waiting);
1292*4882a593Smuzhiyun list_add_tail(®1->session_link, &session->waiting);
1293*4882a593Smuzhiyun mutex_unlock(&rga_service.lock);
1294*4882a593Smuzhiyun
1295*4882a593Smuzhiyun return reg1;
1296*4882a593Smuzhiyun
1297*4882a593Smuzhiyun } while (0);
1298*4882a593Smuzhiyun
1299*4882a593Smuzhiyun if (reg0)
1300*4882a593Smuzhiyun kfree(reg0);
1301*4882a593Smuzhiyun if (reg1)
1302*4882a593Smuzhiyun kfree(reg1);
1303*4882a593Smuzhiyun return NULL;
1304*4882a593Smuzhiyun }
1305*4882a593Smuzhiyun #if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 4, 0))
rga_mem_addr_sel(struct rga_req * req)1306*4882a593Smuzhiyun static void rga_mem_addr_sel(struct rga_req *req)
1307*4882a593Smuzhiyun {
1308*4882a593Smuzhiyun switch (req->src.format) {
1309*4882a593Smuzhiyun case RK_FORMAT_YCbCr_422_SP:
1310*4882a593Smuzhiyun break;
1311*4882a593Smuzhiyun case RK_FORMAT_YCbCr_422_P:
1312*4882a593Smuzhiyun break;
1313*4882a593Smuzhiyun case RK_FORMAT_YCbCr_420_SP:
1314*4882a593Smuzhiyun if ((req->src.yrgb_addr > 0xc0000000) && (req->src.uv_addr > 0xc0000000) &&
1315*4882a593Smuzhiyun (req->dst.yrgb_addr > 0xc0000000)) {
1316*4882a593Smuzhiyun req->src.yrgb_addr = req->src.yrgb_addr - 0x60000000;
1317*4882a593Smuzhiyun req->src.uv_addr = req->src.uv_addr - 0x60000000;
1318*4882a593Smuzhiyun req->dst.yrgb_addr = req->dst.yrgb_addr - 0x60000000;
1319*4882a593Smuzhiyun req->mmu_info.mmu_en = 0;
1320*4882a593Smuzhiyun req->mmu_info.mmu_flag &= 0xfffe;
1321*4882a593Smuzhiyun }
1322*4882a593Smuzhiyun break;
1323*4882a593Smuzhiyun case RK_FORMAT_YCbCr_420_P:
1324*4882a593Smuzhiyun break;
1325*4882a593Smuzhiyun case RK_FORMAT_YCrCb_422_SP:
1326*4882a593Smuzhiyun break;
1327*4882a593Smuzhiyun case RK_FORMAT_YCrCb_422_P:
1328*4882a593Smuzhiyun break;
1329*4882a593Smuzhiyun case RK_FORMAT_YCrCb_420_SP:
1330*4882a593Smuzhiyun break;
1331*4882a593Smuzhiyun case RK_FORMAT_YCrCb_420_P:
1332*4882a593Smuzhiyun break;
1333*4882a593Smuzhiyun default:
1334*4882a593Smuzhiyun break;
1335*4882a593Smuzhiyun }
1336*4882a593Smuzhiyun }
1337*4882a593Smuzhiyun #endif
1338*4882a593Smuzhiyun #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0))
rga_blit(rga_session * session,struct rga_req * req)1339*4882a593Smuzhiyun static int rga_blit(rga_session *session, struct rga_req *req)
1340*4882a593Smuzhiyun {
1341*4882a593Smuzhiyun int ret = -1;
1342*4882a593Smuzhiyun int num = 0;
1343*4882a593Smuzhiyun struct rga_reg *reg;
1344*4882a593Smuzhiyun struct rga_req req2;
1345*4882a593Smuzhiyun
1346*4882a593Smuzhiyun uint32_t saw, sah, daw, dah;
1347*4882a593Smuzhiyun
1348*4882a593Smuzhiyun saw = req->src.act_w;
1349*4882a593Smuzhiyun sah = req->src.act_h;
1350*4882a593Smuzhiyun daw = req->dst.act_w;
1351*4882a593Smuzhiyun dah = req->dst.act_h;
1352*4882a593Smuzhiyun
1353*4882a593Smuzhiyun #if RGA_DEBUGFS
1354*4882a593Smuzhiyun if (RGA_TEST_MSG)
1355*4882a593Smuzhiyun print_debug_info(req);
1356*4882a593Smuzhiyun if (RGA_CHECK_MODE) {
1357*4882a593Smuzhiyun rga_align_check(req);
1358*4882a593Smuzhiyun /*rga_scale_check(req);*/
1359*4882a593Smuzhiyun }
1360*4882a593Smuzhiyun #endif
1361*4882a593Smuzhiyun if (rga_get_dma_buf(req)) {
1362*4882a593Smuzhiyun pr_err("RGA : DMA buf copy error\n");
1363*4882a593Smuzhiyun return -EFAULT;
1364*4882a593Smuzhiyun }
1365*4882a593Smuzhiyun req->render_mode &= (~RGA_BUF_GEM_TYPE_MASK);
1366*4882a593Smuzhiyun do {
1367*4882a593Smuzhiyun if ((req->render_mode == bitblt_mode) && (((saw >> 1) >= daw) || ((sah >> 1) >= dah))) {
1368*4882a593Smuzhiyun /* generate 2 cmd for pre scale */
1369*4882a593Smuzhiyun if (((saw >> 3) > daw) || ((sah >> 3) > dah)) {
1370*4882a593Smuzhiyun pr_err("unsupported to scaling less than 1/8\n");
1371*4882a593Smuzhiyun goto err_put_dma_buf;
1372*4882a593Smuzhiyun }
1373*4882a593Smuzhiyun if (((daw >> 3) > saw) || ((dah >> 3) > daw)) {
1374*4882a593Smuzhiyun pr_err("unsupported to scaling more than 8\n");
1375*4882a593Smuzhiyun goto err_put_dma_buf;
1376*4882a593Smuzhiyun }
1377*4882a593Smuzhiyun ret = rga_check_param(req);
1378*4882a593Smuzhiyun if (ret == -EINVAL) {
1379*4882a593Smuzhiyun pr_err("req 0 argument is inval\n");
1380*4882a593Smuzhiyun goto err_put_dma_buf;
1381*4882a593Smuzhiyun }
1382*4882a593Smuzhiyun
1383*4882a593Smuzhiyun ret = RGA_gen_two_pro(req, &req2);
1384*4882a593Smuzhiyun if (ret == -EINVAL) {
1385*4882a593Smuzhiyun pr_err("RGA_gen_two_pro err\n");
1386*4882a593Smuzhiyun goto err_put_dma_buf;
1387*4882a593Smuzhiyun }
1388*4882a593Smuzhiyun
1389*4882a593Smuzhiyun ret = rga_check_param(req);
1390*4882a593Smuzhiyun if (ret == -EINVAL) {
1391*4882a593Smuzhiyun pr_err("req 1 argument is inval\n");
1392*4882a593Smuzhiyun goto err_put_dma_buf;
1393*4882a593Smuzhiyun }
1394*4882a593Smuzhiyun
1395*4882a593Smuzhiyun ret = rga_check_param(&req2);
1396*4882a593Smuzhiyun if (ret == -EINVAL) {
1397*4882a593Smuzhiyun pr_err("req 2 argument is inval\n");
1398*4882a593Smuzhiyun goto err_put_dma_buf;
1399*4882a593Smuzhiyun }
1400*4882a593Smuzhiyun
1401*4882a593Smuzhiyun reg = rga_reg_init_2(session, req, &req2);
1402*4882a593Smuzhiyun if (!reg) {
1403*4882a593Smuzhiyun pr_err("init2 reg fail\n");
1404*4882a593Smuzhiyun goto err_put_dma_buf;
1405*4882a593Smuzhiyun }
1406*4882a593Smuzhiyun num = 2;
1407*4882a593Smuzhiyun } else {
1408*4882a593Smuzhiyun /* check value if legal */
1409*4882a593Smuzhiyun ret = rga_check_param(req);
1410*4882a593Smuzhiyun if (ret == -EINVAL) {
1411*4882a593Smuzhiyun pr_err("req argument is inval\n");
1412*4882a593Smuzhiyun goto err_put_dma_buf;
1413*4882a593Smuzhiyun }
1414*4882a593Smuzhiyun
1415*4882a593Smuzhiyun reg = rga_reg_init(session, req);
1416*4882a593Smuzhiyun if (!reg) {
1417*4882a593Smuzhiyun pr_err("init reg fail\n");
1418*4882a593Smuzhiyun goto err_put_dma_buf;
1419*4882a593Smuzhiyun }
1420*4882a593Smuzhiyun
1421*4882a593Smuzhiyun num = 1;
1422*4882a593Smuzhiyun }
1423*4882a593Smuzhiyun
1424*4882a593Smuzhiyun mutex_lock(&rga_service.lock);
1425*4882a593Smuzhiyun atomic_add(num, &rga_service.total_running);
1426*4882a593Smuzhiyun rga_try_set_reg();
1427*4882a593Smuzhiyun mutex_unlock(&rga_service.lock);
1428*4882a593Smuzhiyun return 0;
1429*4882a593Smuzhiyun
1430*4882a593Smuzhiyun } while (0);
1431*4882a593Smuzhiyun
1432*4882a593Smuzhiyun err_put_dma_buf:
1433*4882a593Smuzhiyun rga_put_dma_buf(req, NULL);
1434*4882a593Smuzhiyun
1435*4882a593Smuzhiyun return -EFAULT;
1436*4882a593Smuzhiyun }
1437*4882a593Smuzhiyun #else
rga_blit(rga_session * session,struct rga_req * req)1438*4882a593Smuzhiyun static int rga_blit(rga_session *session, struct rga_req *req)
1439*4882a593Smuzhiyun {
1440*4882a593Smuzhiyun int ret = -1;
1441*4882a593Smuzhiyun int num = 0;
1442*4882a593Smuzhiyun struct rga_reg *reg;
1443*4882a593Smuzhiyun struct rga_req req2;
1444*4882a593Smuzhiyun uint32_t saw, sah, daw, dah;
1445*4882a593Smuzhiyun
1446*4882a593Smuzhiyun saw = req->src.act_w;
1447*4882a593Smuzhiyun sah = req->src.act_h;
1448*4882a593Smuzhiyun daw = req->dst.act_w;
1449*4882a593Smuzhiyun dah = req->dst.act_h;
1450*4882a593Smuzhiyun
1451*4882a593Smuzhiyun #if RGA_DEBUGFS
1452*4882a593Smuzhiyun if (RGA_TEST_MSG)
1453*4882a593Smuzhiyun print_debug_info(req);
1454*4882a593Smuzhiyun if (RGA_CHECK_MODE) {
1455*4882a593Smuzhiyun rga_align_check(req);
1456*4882a593Smuzhiyun /*rga_scale_check(req);*/
1457*4882a593Smuzhiyun }
1458*4882a593Smuzhiyun #endif
1459*4882a593Smuzhiyun if (rga_convert_dma_buf(req)) {
1460*4882a593Smuzhiyun pr_err("RGA : DMA buf copy error\n");
1461*4882a593Smuzhiyun return -EFAULT;
1462*4882a593Smuzhiyun }
1463*4882a593Smuzhiyun do {
1464*4882a593Smuzhiyun if ((req->render_mode == bitblt_mode) && (((saw >> 1) >= daw) || ((sah >> 1) >= dah))) {
1465*4882a593Smuzhiyun /* generate 2 cmd for pre scale */
1466*4882a593Smuzhiyun ret = rga_check_param(req);
1467*4882a593Smuzhiyun if (ret == -EINVAL) {
1468*4882a593Smuzhiyun pr_err("req 0 argument is inval\n");
1469*4882a593Smuzhiyun break;
1470*4882a593Smuzhiyun }
1471*4882a593Smuzhiyun
1472*4882a593Smuzhiyun ret = RGA_gen_two_pro(req, &req2);
1473*4882a593Smuzhiyun if (ret == -EINVAL)
1474*4882a593Smuzhiyun break;
1475*4882a593Smuzhiyun
1476*4882a593Smuzhiyun ret = rga_check_param(req);
1477*4882a593Smuzhiyun if (ret == -EINVAL) {
1478*4882a593Smuzhiyun pr_err("req 1 argument is inval\n");
1479*4882a593Smuzhiyun break;
1480*4882a593Smuzhiyun }
1481*4882a593Smuzhiyun
1482*4882a593Smuzhiyun ret = rga_check_param(&req2);
1483*4882a593Smuzhiyun if (ret == -EINVAL) {
1484*4882a593Smuzhiyun pr_err("req 2 argument is inval\n");
1485*4882a593Smuzhiyun break;
1486*4882a593Smuzhiyun }
1487*4882a593Smuzhiyun
1488*4882a593Smuzhiyun reg = rga_reg_init_2(session, req, &req2);
1489*4882a593Smuzhiyun if (!reg)
1490*4882a593Smuzhiyun break;
1491*4882a593Smuzhiyun num = 2;
1492*4882a593Smuzhiyun
1493*4882a593Smuzhiyun } else {
1494*4882a593Smuzhiyun /* check value if legal */
1495*4882a593Smuzhiyun ret = rga_check_param(req);
1496*4882a593Smuzhiyun if (ret == -EINVAL) {
1497*4882a593Smuzhiyun pr_err("req argument is inval\n");
1498*4882a593Smuzhiyun break;
1499*4882a593Smuzhiyun }
1500*4882a593Smuzhiyun
1501*4882a593Smuzhiyun if (req->render_mode == bitblt_mode)
1502*4882a593Smuzhiyun rga_mem_addr_sel(req);
1503*4882a593Smuzhiyun
1504*4882a593Smuzhiyun reg = rga_reg_init(session, req);
1505*4882a593Smuzhiyun if (!reg)
1506*4882a593Smuzhiyun break;
1507*4882a593Smuzhiyun num = 1;
1508*4882a593Smuzhiyun }
1509*4882a593Smuzhiyun
1510*4882a593Smuzhiyun mutex_lock(&rga_service.lock);
1511*4882a593Smuzhiyun atomic_add(num, &rga_service.total_running);
1512*4882a593Smuzhiyun rga_try_set_reg();
1513*4882a593Smuzhiyun mutex_unlock(&rga_service.lock);
1514*4882a593Smuzhiyun
1515*4882a593Smuzhiyun return 0;
1516*4882a593Smuzhiyun } while (0);
1517*4882a593Smuzhiyun
1518*4882a593Smuzhiyun return -EFAULT;
1519*4882a593Smuzhiyun }
1520*4882a593Smuzhiyun #endif
1521*4882a593Smuzhiyun
rga_blit_async(rga_session * session,struct rga_req * req)1522*4882a593Smuzhiyun static int rga_blit_async(rga_session *session, struct rga_req *req)
1523*4882a593Smuzhiyun {
1524*4882a593Smuzhiyun int ret = -1;
1525*4882a593Smuzhiyun
1526*4882a593Smuzhiyun #if RGA_DEBUGFS
1527*4882a593Smuzhiyun if (RGA_TEST_MSG)
1528*4882a593Smuzhiyun DBG("*** rga_blit_async proc ***\n");
1529*4882a593Smuzhiyun #endif
1530*4882a593Smuzhiyun atomic_set(&session->done, 0);
1531*4882a593Smuzhiyun ret = rga_blit(session, req);
1532*4882a593Smuzhiyun return ret;
1533*4882a593Smuzhiyun }
1534*4882a593Smuzhiyun
rga_blit_sync(rga_session * session,struct rga_req * req)1535*4882a593Smuzhiyun static int rga_blit_sync(rga_session *session, struct rga_req *req)
1536*4882a593Smuzhiyun {
1537*4882a593Smuzhiyun int ret = -1;
1538*4882a593Smuzhiyun int ret_timeout = 0;
1539*4882a593Smuzhiyun
1540*4882a593Smuzhiyun #if RGA_DEBUGFS
1541*4882a593Smuzhiyun if (RGA_TEST_MSG)
1542*4882a593Smuzhiyun DBG("*** rga_blit_sync proc ***\n");
1543*4882a593Smuzhiyun #endif
1544*4882a593Smuzhiyun
1545*4882a593Smuzhiyun atomic_set(&session->done, 0);
1546*4882a593Smuzhiyun ret = rga_blit(session, req);
1547*4882a593Smuzhiyun if(ret < 0)
1548*4882a593Smuzhiyun return ret;
1549*4882a593Smuzhiyun
1550*4882a593Smuzhiyun ret_timeout = wait_event_timeout(session->wait, atomic_read(&session->done), RGA_TIMEOUT_DELAY);
1551*4882a593Smuzhiyun
1552*4882a593Smuzhiyun if (unlikely(ret_timeout< 0)) {
1553*4882a593Smuzhiyun mutex_lock(&rga_service.lock);
1554*4882a593Smuzhiyun rga_del_running_list();
1555*4882a593Smuzhiyun mutex_unlock(&rga_service.lock);
1556*4882a593Smuzhiyun ret = ret_timeout;
1557*4882a593Smuzhiyun }
1558*4882a593Smuzhiyun else if (0 == ret_timeout) {
1559*4882a593Smuzhiyun mutex_lock(&rga_service.lock);
1560*4882a593Smuzhiyun rga_del_running_list_timeout();
1561*4882a593Smuzhiyun rga_try_set_reg();
1562*4882a593Smuzhiyun mutex_unlock(&rga_service.lock);
1563*4882a593Smuzhiyun ret = -ETIMEDOUT;
1564*4882a593Smuzhiyun }
1565*4882a593Smuzhiyun
1566*4882a593Smuzhiyun #if RGA_DEBUGFS
1567*4882a593Smuzhiyun if (RGA_TEST_TIME) {
1568*4882a593Smuzhiyun rga_end = ktime_get();
1569*4882a593Smuzhiyun rga_end = ktime_sub(rga_end, rga_start);
1570*4882a593Smuzhiyun DBG("sync one cmd end time %d us\n", (int)ktime_to_us(rga_end));
1571*4882a593Smuzhiyun }
1572*4882a593Smuzhiyun #endif
1573*4882a593Smuzhiyun
1574*4882a593Smuzhiyun return ret;
1575*4882a593Smuzhiyun }
1576*4882a593Smuzhiyun
1577*4882a593Smuzhiyun
rga_ioctl(struct file * file,uint32_t cmd,unsigned long arg)1578*4882a593Smuzhiyun static long rga_ioctl(struct file *file, uint32_t cmd, unsigned long arg)
1579*4882a593Smuzhiyun {
1580*4882a593Smuzhiyun struct rga_req req;
1581*4882a593Smuzhiyun int ret = 0;
1582*4882a593Smuzhiyun rga_session *session;
1583*4882a593Smuzhiyun
1584*4882a593Smuzhiyun memset(&req, 0x0, sizeof(req));
1585*4882a593Smuzhiyun mutex_lock(&rga_service.mutex);
1586*4882a593Smuzhiyun
1587*4882a593Smuzhiyun session = (rga_session *)file->private_data;
1588*4882a593Smuzhiyun
1589*4882a593Smuzhiyun if (NULL == session) {
1590*4882a593Smuzhiyun printk("%s [%d] rga thread session is null\n",__FUNCTION__,__LINE__);
1591*4882a593Smuzhiyun mutex_unlock(&rga_service.mutex);
1592*4882a593Smuzhiyun return -EINVAL;
1593*4882a593Smuzhiyun }
1594*4882a593Smuzhiyun
1595*4882a593Smuzhiyun memset(&req, 0x0, sizeof(req));
1596*4882a593Smuzhiyun #if RGA_DEBUGFS
1597*4882a593Smuzhiyun if (RGA_TEST_MSG)
1598*4882a593Smuzhiyun DBG("cmd is %s(0x%x)\n", rga_get_cmd_mode_str(cmd), cmd);
1599*4882a593Smuzhiyun if (RGA_NONUSE) {
1600*4882a593Smuzhiyun mutex_unlock(&rga_service.mutex);
1601*4882a593Smuzhiyun return 0;
1602*4882a593Smuzhiyun }
1603*4882a593Smuzhiyun #endif
1604*4882a593Smuzhiyun switch (cmd) {
1605*4882a593Smuzhiyun case RGA_BLIT_SYNC:
1606*4882a593Smuzhiyun if (unlikely(copy_from_user(&req, (struct rga_req*)arg, sizeof(struct rga_req))))
1607*4882a593Smuzhiyun {
1608*4882a593Smuzhiyun ERR("copy_from_user failed\n");
1609*4882a593Smuzhiyun ret = -EFAULT;
1610*4882a593Smuzhiyun break;
1611*4882a593Smuzhiyun }
1612*4882a593Smuzhiyun ret = rga_blit_sync(session, &req);
1613*4882a593Smuzhiyun break;
1614*4882a593Smuzhiyun case RGA_BLIT_ASYNC:
1615*4882a593Smuzhiyun if (unlikely(copy_from_user(&req, (struct rga_req*)arg, sizeof(struct rga_req))))
1616*4882a593Smuzhiyun {
1617*4882a593Smuzhiyun ERR("copy_from_user failed\n");
1618*4882a593Smuzhiyun ret = -EFAULT;
1619*4882a593Smuzhiyun break;
1620*4882a593Smuzhiyun }
1621*4882a593Smuzhiyun
1622*4882a593Smuzhiyun if((atomic_read(&rga_service.total_running) > 16))
1623*4882a593Smuzhiyun {
1624*4882a593Smuzhiyun ret = rga_blit_sync(session, &req);
1625*4882a593Smuzhiyun }
1626*4882a593Smuzhiyun else
1627*4882a593Smuzhiyun {
1628*4882a593Smuzhiyun ret = rga_blit_async(session, &req);
1629*4882a593Smuzhiyun }
1630*4882a593Smuzhiyun break;
1631*4882a593Smuzhiyun case RGA_FLUSH:
1632*4882a593Smuzhiyun ret = rga_flush(session, arg);
1633*4882a593Smuzhiyun break;
1634*4882a593Smuzhiyun case RGA_GET_RESULT:
1635*4882a593Smuzhiyun ret = rga_get_result(session, arg);
1636*4882a593Smuzhiyun break;
1637*4882a593Smuzhiyun case RGA_GET_VERSION:
1638*4882a593Smuzhiyun if (!rga_drvdata->version) {
1639*4882a593Smuzhiyun rga_drvdata->version = kzalloc(16, GFP_KERNEL);
1640*4882a593Smuzhiyun if (!rga_drvdata->version) {
1641*4882a593Smuzhiyun ret = -ENOMEM;
1642*4882a593Smuzhiyun break;
1643*4882a593Smuzhiyun }
1644*4882a593Smuzhiyun rga_power_on();
1645*4882a593Smuzhiyun udelay(1);
1646*4882a593Smuzhiyun if (rga_read(RGA_VERSION) == 0x02018632)
1647*4882a593Smuzhiyun snprintf(rga_drvdata->version, 16, "1.6");
1648*4882a593Smuzhiyun else
1649*4882a593Smuzhiyun snprintf(rga_drvdata->version, 16, "1.003");
1650*4882a593Smuzhiyun }
1651*4882a593Smuzhiyun
1652*4882a593Smuzhiyun ret = copy_to_user((void *)arg, rga_drvdata->version, 16);
1653*4882a593Smuzhiyun break;
1654*4882a593Smuzhiyun default:
1655*4882a593Smuzhiyun ret = -EINVAL;
1656*4882a593Smuzhiyun break;
1657*4882a593Smuzhiyun }
1658*4882a593Smuzhiyun
1659*4882a593Smuzhiyun mutex_unlock(&rga_service.mutex);
1660*4882a593Smuzhiyun
1661*4882a593Smuzhiyun return ret;
1662*4882a593Smuzhiyun }
1663*4882a593Smuzhiyun
1664*4882a593Smuzhiyun
rga_ioctl_kernel(struct rga_req * req)1665*4882a593Smuzhiyun long rga_ioctl_kernel(struct rga_req *req)
1666*4882a593Smuzhiyun {
1667*4882a593Smuzhiyun int ret = 0;
1668*4882a593Smuzhiyun if (!rga_ioctl_kernel_p) {
1669*4882a593Smuzhiyun printk("rga_ioctl_kernel_p is NULL\n");
1670*4882a593Smuzhiyun return -1;
1671*4882a593Smuzhiyun }
1672*4882a593Smuzhiyun else {
1673*4882a593Smuzhiyun ret = (*rga_ioctl_kernel_p)(req);
1674*4882a593Smuzhiyun return ret;
1675*4882a593Smuzhiyun }
1676*4882a593Smuzhiyun }
1677*4882a593Smuzhiyun
1678*4882a593Smuzhiyun
rga_ioctl_kernel_imp(struct rga_req * req)1679*4882a593Smuzhiyun long rga_ioctl_kernel_imp(struct rga_req *req)
1680*4882a593Smuzhiyun {
1681*4882a593Smuzhiyun int ret = 0;
1682*4882a593Smuzhiyun rga_session *session;
1683*4882a593Smuzhiyun
1684*4882a593Smuzhiyun mutex_lock(&rga_service.mutex);
1685*4882a593Smuzhiyun
1686*4882a593Smuzhiyun session = &rga_session_global;
1687*4882a593Smuzhiyun
1688*4882a593Smuzhiyun if (NULL == session) {
1689*4882a593Smuzhiyun printk("%s [%d] rga thread session is null\n",__FUNCTION__,__LINE__);
1690*4882a593Smuzhiyun mutex_unlock(&rga_service.mutex);
1691*4882a593Smuzhiyun return -EINVAL;
1692*4882a593Smuzhiyun }
1693*4882a593Smuzhiyun
1694*4882a593Smuzhiyun ret = rga_blit_sync(session, req);
1695*4882a593Smuzhiyun
1696*4882a593Smuzhiyun mutex_unlock(&rga_service.mutex);
1697*4882a593Smuzhiyun
1698*4882a593Smuzhiyun return ret;
1699*4882a593Smuzhiyun }
1700*4882a593Smuzhiyun
1701*4882a593Smuzhiyun
rga_open(struct inode * inode,struct file * file)1702*4882a593Smuzhiyun static int rga_open(struct inode *inode, struct file *file)
1703*4882a593Smuzhiyun {
1704*4882a593Smuzhiyun rga_session *session = kzalloc(sizeof(rga_session), GFP_KERNEL);
1705*4882a593Smuzhiyun if (NULL == session) {
1706*4882a593Smuzhiyun pr_err("unable to allocate memory for rga_session.");
1707*4882a593Smuzhiyun return -ENOMEM;
1708*4882a593Smuzhiyun }
1709*4882a593Smuzhiyun
1710*4882a593Smuzhiyun session->pid = current->pid;
1711*4882a593Smuzhiyun //printk(KERN_DEBUG "+");
1712*4882a593Smuzhiyun
1713*4882a593Smuzhiyun INIT_LIST_HEAD(&session->waiting);
1714*4882a593Smuzhiyun INIT_LIST_HEAD(&session->running);
1715*4882a593Smuzhiyun INIT_LIST_HEAD(&session->list_session);
1716*4882a593Smuzhiyun init_waitqueue_head(&session->wait);
1717*4882a593Smuzhiyun mutex_lock(&rga_service.lock);
1718*4882a593Smuzhiyun list_add_tail(&session->list_session, &rga_service.session);
1719*4882a593Smuzhiyun mutex_unlock(&rga_service.lock);
1720*4882a593Smuzhiyun atomic_set(&session->task_running, 0);
1721*4882a593Smuzhiyun atomic_set(&session->num_done, 0);
1722*4882a593Smuzhiyun
1723*4882a593Smuzhiyun file->private_data = (void *)session;
1724*4882a593Smuzhiyun
1725*4882a593Smuzhiyun //DBG("*** rga dev opened by pid %d *** \n", session->pid);
1726*4882a593Smuzhiyun return nonseekable_open(inode, file);
1727*4882a593Smuzhiyun
1728*4882a593Smuzhiyun }
1729*4882a593Smuzhiyun
rga_release(struct inode * inode,struct file * file)1730*4882a593Smuzhiyun static int rga_release(struct inode *inode, struct file *file)
1731*4882a593Smuzhiyun {
1732*4882a593Smuzhiyun int task_running;
1733*4882a593Smuzhiyun rga_session *session = (rga_session *)file->private_data;
1734*4882a593Smuzhiyun if (NULL == session)
1735*4882a593Smuzhiyun return -EINVAL;
1736*4882a593Smuzhiyun //printk(KERN_DEBUG "-");
1737*4882a593Smuzhiyun task_running = atomic_read(&session->task_running);
1738*4882a593Smuzhiyun
1739*4882a593Smuzhiyun if (task_running)
1740*4882a593Smuzhiyun {
1741*4882a593Smuzhiyun pr_err("rga_service session %d still has %d task running when closing\n", session->pid, task_running);
1742*4882a593Smuzhiyun msleep(100);
1743*4882a593Smuzhiyun }
1744*4882a593Smuzhiyun
1745*4882a593Smuzhiyun wake_up(&session->wait);
1746*4882a593Smuzhiyun mutex_lock(&rga_service.lock);
1747*4882a593Smuzhiyun list_del(&session->list_session);
1748*4882a593Smuzhiyun rga_service_session_clear(session);
1749*4882a593Smuzhiyun kfree(session);
1750*4882a593Smuzhiyun mutex_unlock(&rga_service.lock);
1751*4882a593Smuzhiyun
1752*4882a593Smuzhiyun //DBG("*** rga dev close ***\n");
1753*4882a593Smuzhiyun return 0;
1754*4882a593Smuzhiyun }
1755*4882a593Smuzhiyun
rga_irq_thread(int irq,void * dev_id)1756*4882a593Smuzhiyun static irqreturn_t rga_irq_thread(int irq, void *dev_id)
1757*4882a593Smuzhiyun {
1758*4882a593Smuzhiyun #if RGA_DEBUGFS
1759*4882a593Smuzhiyun if (RGA_INT_FLAG)
1760*4882a593Smuzhiyun DBG("irqthread INT[%x], STATS[%x]\n", rga_read(RGA_INT), rga_read(RGA_STATUS));
1761*4882a593Smuzhiyun #endif
1762*4882a593Smuzhiyun mutex_lock(&rga_service.lock);
1763*4882a593Smuzhiyun if (rga_service.enable) {
1764*4882a593Smuzhiyun rga_del_running_list();
1765*4882a593Smuzhiyun rga_try_set_reg();
1766*4882a593Smuzhiyun }
1767*4882a593Smuzhiyun mutex_unlock(&rga_service.lock);
1768*4882a593Smuzhiyun
1769*4882a593Smuzhiyun return IRQ_HANDLED;
1770*4882a593Smuzhiyun }
1771*4882a593Smuzhiyun
rga_irq(int irq,void * dev_id)1772*4882a593Smuzhiyun static irqreturn_t rga_irq(int irq, void *dev_id)
1773*4882a593Smuzhiyun {
1774*4882a593Smuzhiyun #if RGA_DEBUGFS
1775*4882a593Smuzhiyun if (RGA_INT_FLAG)
1776*4882a593Smuzhiyun DBG("irq INT[%x], STATS[%x]\n", rga_read(RGA_INT), rga_read(RGA_STATUS));
1777*4882a593Smuzhiyun #endif
1778*4882a593Smuzhiyun /*if error interrupt then soft reset hardware*/
1779*4882a593Smuzhiyun if (rga_read(RGA_INT) & 0x03) {
1780*4882a593Smuzhiyun pr_err("Err irq INT[%x], STATS[%x]\n", rga_read(RGA_INT), rga_read(RGA_STATUS));
1781*4882a593Smuzhiyun rga_soft_reset();
1782*4882a593Smuzhiyun }
1783*4882a593Smuzhiyun /*clear INT */
1784*4882a593Smuzhiyun rga_write(rga_read(RGA_INT) | (0x1<<6) | (0x1<<7) | (0x1<<5) | (0x1<<4), RGA_INT);
1785*4882a593Smuzhiyun
1786*4882a593Smuzhiyun return IRQ_WAKE_THREAD;
1787*4882a593Smuzhiyun }
1788*4882a593Smuzhiyun
1789*4882a593Smuzhiyun struct file_operations rga_fops = {
1790*4882a593Smuzhiyun .owner = THIS_MODULE,
1791*4882a593Smuzhiyun .open = rga_open,
1792*4882a593Smuzhiyun .release = rga_release,
1793*4882a593Smuzhiyun .unlocked_ioctl = rga_ioctl,
1794*4882a593Smuzhiyun };
1795*4882a593Smuzhiyun
1796*4882a593Smuzhiyun static struct miscdevice rga_dev ={
1797*4882a593Smuzhiyun .minor = RGA_MAJOR,
1798*4882a593Smuzhiyun .name = "rga",
1799*4882a593Smuzhiyun .fops = &rga_fops,
1800*4882a593Smuzhiyun };
1801*4882a593Smuzhiyun
1802*4882a593Smuzhiyun #if defined(CONFIG_OF)
1803*4882a593Smuzhiyun static const struct of_device_id rockchip_rga_dt_ids[] = {
1804*4882a593Smuzhiyun { .compatible = "rockchip,rk312x-rga", },
1805*4882a593Smuzhiyun {},
1806*4882a593Smuzhiyun };
1807*4882a593Smuzhiyun #endif
1808*4882a593Smuzhiyun
rga_drv_probe(struct platform_device * pdev)1809*4882a593Smuzhiyun static int rga_drv_probe(struct platform_device *pdev)
1810*4882a593Smuzhiyun {
1811*4882a593Smuzhiyun struct rga_drvdata *data;
1812*4882a593Smuzhiyun struct resource *res;
1813*4882a593Smuzhiyun //struct device_node *np = pdev->dev.of_node;
1814*4882a593Smuzhiyun int ret = 0;
1815*4882a593Smuzhiyun
1816*4882a593Smuzhiyun mutex_init(&rga_service.lock);
1817*4882a593Smuzhiyun mutex_init(&rga_service.mutex);
1818*4882a593Smuzhiyun atomic_set(&rga_service.total_running, 0);
1819*4882a593Smuzhiyun rga_service.enable = false;
1820*4882a593Smuzhiyun
1821*4882a593Smuzhiyun rga_ioctl_kernel_p = rga_ioctl_kernel_imp;
1822*4882a593Smuzhiyun
1823*4882a593Smuzhiyun data = devm_kzalloc(&pdev->dev, sizeof(struct rga_drvdata), GFP_KERNEL);
1824*4882a593Smuzhiyun if(! data) {
1825*4882a593Smuzhiyun ERR("failed to allocate driver data.\n");
1826*4882a593Smuzhiyun return -ENOMEM;
1827*4882a593Smuzhiyun }
1828*4882a593Smuzhiyun
1829*4882a593Smuzhiyun INIT_DELAYED_WORK(&data->power_off_work, rga_power_off_work);
1830*4882a593Smuzhiyun wake_lock_init(&data->wake_lock, WAKE_LOCK_SUSPEND, "rga");
1831*4882a593Smuzhiyun
1832*4882a593Smuzhiyun #if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 4, 0))
1833*4882a593Smuzhiyun data->pd_rga = devm_clk_get(&pdev->dev, "pd_rga");
1834*4882a593Smuzhiyun if (IS_ERR(data->pd_rga)) {
1835*4882a593Smuzhiyun dev_err(&pdev->dev, "Failed to get rga power domain");
1836*4882a593Smuzhiyun data->pd_rga = NULL;
1837*4882a593Smuzhiyun }
1838*4882a593Smuzhiyun #endif
1839*4882a593Smuzhiyun data->aclk_rga = devm_clk_get(&pdev->dev, "aclk_rga");
1840*4882a593Smuzhiyun data->hclk_rga = devm_clk_get(&pdev->dev, "hclk_rga");
1841*4882a593Smuzhiyun
1842*4882a593Smuzhiyun /* map the registers */
1843*4882a593Smuzhiyun res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1844*4882a593Smuzhiyun data->rga_base = devm_ioremap_resource(&pdev->dev, res);
1845*4882a593Smuzhiyun if (!data->rga_base) {
1846*4882a593Smuzhiyun ERR("rga ioremap failed\n");
1847*4882a593Smuzhiyun ret = -ENOENT;
1848*4882a593Smuzhiyun goto err_ioremap;
1849*4882a593Smuzhiyun }
1850*4882a593Smuzhiyun
1851*4882a593Smuzhiyun /* get the IRQ */
1852*4882a593Smuzhiyun data->irq = ret = platform_get_irq(pdev, 0);
1853*4882a593Smuzhiyun if (ret <= 0) {
1854*4882a593Smuzhiyun ERR("failed to get rga irq resource (%d).\n", data->irq);
1855*4882a593Smuzhiyun ret = data->irq;
1856*4882a593Smuzhiyun goto err_irq;
1857*4882a593Smuzhiyun }
1858*4882a593Smuzhiyun
1859*4882a593Smuzhiyun /* request the IRQ */
1860*4882a593Smuzhiyun //ret = request_threaded_irq(data->irq, rga_irq, rga_irq_thread, 0, "rga", pdev);
1861*4882a593Smuzhiyun ret = devm_request_threaded_irq(&pdev->dev, data->irq, rga_irq, rga_irq_thread, 0, "rga", data);
1862*4882a593Smuzhiyun if (ret)
1863*4882a593Smuzhiyun {
1864*4882a593Smuzhiyun ERR("rga request_irq failed (%d).\n", ret);
1865*4882a593Smuzhiyun goto err_irq;
1866*4882a593Smuzhiyun }
1867*4882a593Smuzhiyun
1868*4882a593Smuzhiyun platform_set_drvdata(pdev, data);
1869*4882a593Smuzhiyun data->dev = &pdev->dev;
1870*4882a593Smuzhiyun rga_drvdata = data;
1871*4882a593Smuzhiyun
1872*4882a593Smuzhiyun #if defined(CONFIG_ION_ROCKCHIP)
1873*4882a593Smuzhiyun data->ion_client = rockchip_ion_client_create("rga");
1874*4882a593Smuzhiyun if (IS_ERR(data->ion_client)) {
1875*4882a593Smuzhiyun dev_err(&pdev->dev, "failed to create ion client for rga");
1876*4882a593Smuzhiyun return PTR_ERR(data->ion_client);
1877*4882a593Smuzhiyun } else {
1878*4882a593Smuzhiyun dev_info(&pdev->dev, "rga ion client create success!\n");
1879*4882a593Smuzhiyun }
1880*4882a593Smuzhiyun #endif
1881*4882a593Smuzhiyun
1882*4882a593Smuzhiyun ret = misc_register(&rga_dev);
1883*4882a593Smuzhiyun if(ret)
1884*4882a593Smuzhiyun {
1885*4882a593Smuzhiyun ERR("cannot register miscdev (%d)\n", ret);
1886*4882a593Smuzhiyun goto err_misc_register;
1887*4882a593Smuzhiyun }
1888*4882a593Smuzhiyun
1889*4882a593Smuzhiyun #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0))
1890*4882a593Smuzhiyun pm_runtime_enable(&pdev->dev);
1891*4882a593Smuzhiyun #endif
1892*4882a593Smuzhiyun
1893*4882a593Smuzhiyun pr_info("Driver loaded successfully\n");
1894*4882a593Smuzhiyun
1895*4882a593Smuzhiyun return 0;
1896*4882a593Smuzhiyun
1897*4882a593Smuzhiyun err_misc_register:
1898*4882a593Smuzhiyun free_irq(data->irq, pdev);
1899*4882a593Smuzhiyun err_irq:
1900*4882a593Smuzhiyun iounmap(data->rga_base);
1901*4882a593Smuzhiyun err_ioremap:
1902*4882a593Smuzhiyun wake_lock_destroy(&data->wake_lock);
1903*4882a593Smuzhiyun //kfree(data);
1904*4882a593Smuzhiyun
1905*4882a593Smuzhiyun return ret;
1906*4882a593Smuzhiyun }
1907*4882a593Smuzhiyun
rga_drv_remove(struct platform_device * pdev)1908*4882a593Smuzhiyun static int rga_drv_remove(struct platform_device *pdev)
1909*4882a593Smuzhiyun {
1910*4882a593Smuzhiyun struct rga_drvdata *data = platform_get_drvdata(pdev);
1911*4882a593Smuzhiyun DBG("%s [%d]\n",__FUNCTION__,__LINE__);
1912*4882a593Smuzhiyun
1913*4882a593Smuzhiyun wake_lock_destroy(&data->wake_lock);
1914*4882a593Smuzhiyun misc_deregister(&(data->miscdev));
1915*4882a593Smuzhiyun free_irq(data->irq, &data->miscdev);
1916*4882a593Smuzhiyun iounmap((void __iomem *)(data->rga_base));
1917*4882a593Smuzhiyun kfree(data->version);
1918*4882a593Smuzhiyun #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0))
1919*4882a593Smuzhiyun devm_clk_put(&pdev->dev, data->aclk_rga);
1920*4882a593Smuzhiyun devm_clk_put(&pdev->dev, data->hclk_rga);
1921*4882a593Smuzhiyun pm_runtime_disable(&pdev->dev);
1922*4882a593Smuzhiyun #else
1923*4882a593Smuzhiyun if (data->pd_rga)
1924*4882a593Smuzhiyun devm_clk_put(&pdev->dev, data->pd_rga);
1925*4882a593Smuzhiyun devm_clk_put(&pdev->dev, data->aclk_rga);
1926*4882a593Smuzhiyun devm_clk_put(&pdev->dev, data->hclk_rga);
1927*4882a593Smuzhiyun #endif
1928*4882a593Smuzhiyun //clk_put(data->pd_rga);
1929*4882a593Smuzhiyun
1930*4882a593Smuzhiyun //kfree(data);
1931*4882a593Smuzhiyun return 0;
1932*4882a593Smuzhiyun }
1933*4882a593Smuzhiyun
1934*4882a593Smuzhiyun static struct platform_driver rga_driver = {
1935*4882a593Smuzhiyun .probe = rga_drv_probe,
1936*4882a593Smuzhiyun .remove = rga_drv_remove,
1937*4882a593Smuzhiyun .driver = {
1938*4882a593Smuzhiyun .owner = THIS_MODULE,
1939*4882a593Smuzhiyun .name = "rga",
1940*4882a593Smuzhiyun .of_match_table = of_match_ptr(rockchip_rga_dt_ids),
1941*4882a593Smuzhiyun },
1942*4882a593Smuzhiyun };
1943*4882a593Smuzhiyun
1944*4882a593Smuzhiyun #if RGA_DEBUGFS
1945*4882a593Smuzhiyun void rga_slt(void);
1946*4882a593Smuzhiyun
rga_debug_show(struct seq_file * m,void * data)1947*4882a593Smuzhiyun static int rga_debug_show(struct seq_file *m, void *data)
1948*4882a593Smuzhiyun {
1949*4882a593Smuzhiyun seq_puts(m, "echo reg > rga to open rga reg MSG\n");
1950*4882a593Smuzhiyun seq_puts(m, "echo msg > rga to open rga msg MSG\n");
1951*4882a593Smuzhiyun seq_puts(m, "echo time > rga to open rga time MSG\n");
1952*4882a593Smuzhiyun seq_puts(m, "echo check > rga to open rga check flag\n");
1953*4882a593Smuzhiyun seq_puts(m, "echo int > rga to open rga int flag\n");
1954*4882a593Smuzhiyun seq_puts(m, "echo stop > rga to stop using hardware\n");
1955*4882a593Smuzhiyun return 0;
1956*4882a593Smuzhiyun }
1957*4882a593Smuzhiyun
rga_debug_write(struct file * file,const char __user * ubuf,size_t len,loff_t * offp)1958*4882a593Smuzhiyun static ssize_t rga_debug_write(struct file *file, const char __user *ubuf,
1959*4882a593Smuzhiyun size_t len, loff_t *offp)
1960*4882a593Smuzhiyun {
1961*4882a593Smuzhiyun char buf[14];
1962*4882a593Smuzhiyun
1963*4882a593Smuzhiyun if (len > sizeof(buf) - 1)
1964*4882a593Smuzhiyun return -EINVAL;
1965*4882a593Smuzhiyun if (copy_from_user(buf, ubuf, len))
1966*4882a593Smuzhiyun return -EFAULT;
1967*4882a593Smuzhiyun buf[len - 1] = '\0';
1968*4882a593Smuzhiyun if (strncmp(buf, "reg", 4) == 0) {
1969*4882a593Smuzhiyun if (RGA_TEST_REG) {
1970*4882a593Smuzhiyun RGA_TEST_REG = 0;
1971*4882a593Smuzhiyun DBG("close rga reg!\n");
1972*4882a593Smuzhiyun } else {
1973*4882a593Smuzhiyun RGA_TEST_REG = 1;
1974*4882a593Smuzhiyun DBG("open rga reg!\n");
1975*4882a593Smuzhiyun }
1976*4882a593Smuzhiyun } else if (strncmp(buf, "msg", 3) == 0) {
1977*4882a593Smuzhiyun if (RGA_TEST_MSG) {
1978*4882a593Smuzhiyun RGA_TEST_MSG = 0;
1979*4882a593Smuzhiyun DBG("close rga test MSG!\n");
1980*4882a593Smuzhiyun } else {
1981*4882a593Smuzhiyun RGA_TEST_MSG = 1;
1982*4882a593Smuzhiyun DBG("open rga test MSG!\n");
1983*4882a593Smuzhiyun }
1984*4882a593Smuzhiyun } else if (strncmp(buf, "time", 4) == 0) {
1985*4882a593Smuzhiyun if (RGA_TEST_TIME) {
1986*4882a593Smuzhiyun RGA_TEST_TIME = 0;
1987*4882a593Smuzhiyun DBG("close rga test time!\n");
1988*4882a593Smuzhiyun } else {
1989*4882a593Smuzhiyun RGA_TEST_TIME = 1;
1990*4882a593Smuzhiyun DBG("open rga test time!\n");
1991*4882a593Smuzhiyun }
1992*4882a593Smuzhiyun } else if (strncmp(buf, "check", 5) == 0) {
1993*4882a593Smuzhiyun if (RGA_CHECK_MODE) {
1994*4882a593Smuzhiyun RGA_CHECK_MODE = 0;
1995*4882a593Smuzhiyun DBG("close rga check mode!\n");
1996*4882a593Smuzhiyun } else {
1997*4882a593Smuzhiyun RGA_CHECK_MODE = 1;
1998*4882a593Smuzhiyun DBG("open rga check mode!\n");
1999*4882a593Smuzhiyun }
2000*4882a593Smuzhiyun } else if (strncmp(buf, "stop", 4) == 0) {
2001*4882a593Smuzhiyun if (RGA_NONUSE) {
2002*4882a593Smuzhiyun RGA_NONUSE = 0;
2003*4882a593Smuzhiyun DBG("stop using rga hardware!\n");
2004*4882a593Smuzhiyun } else {
2005*4882a593Smuzhiyun RGA_NONUSE = 1;
2006*4882a593Smuzhiyun DBG("use rga hardware!\n");
2007*4882a593Smuzhiyun }
2008*4882a593Smuzhiyun } else if (strncmp(buf, "int", 3) == 0) {
2009*4882a593Smuzhiyun if (RGA_INT_FLAG) {
2010*4882a593Smuzhiyun RGA_INT_FLAG = 0;
2011*4882a593Smuzhiyun DBG("close rga interuppt mesg!\n");
2012*4882a593Smuzhiyun } else {
2013*4882a593Smuzhiyun RGA_INT_FLAG = 1;
2014*4882a593Smuzhiyun DBG("open rga interuppt mesg!\n");
2015*4882a593Smuzhiyun }
2016*4882a593Smuzhiyun } else if (strncmp(buf, "slt", 3) == 0) {
2017*4882a593Smuzhiyun rga_slt();
2018*4882a593Smuzhiyun }
2019*4882a593Smuzhiyun return len;
2020*4882a593Smuzhiyun }
2021*4882a593Smuzhiyun
rga_debug_open(struct inode * inode,struct file * file)2022*4882a593Smuzhiyun static int rga_debug_open(struct inode *inode, struct file *file)
2023*4882a593Smuzhiyun
2024*4882a593Smuzhiyun {
2025*4882a593Smuzhiyun return single_open(file, rga_debug_show, NULL);
2026*4882a593Smuzhiyun }
2027*4882a593Smuzhiyun
2028*4882a593Smuzhiyun static const struct file_operations rga_debug_fops = {
2029*4882a593Smuzhiyun .owner = THIS_MODULE,
2030*4882a593Smuzhiyun .open = rga_debug_open,
2031*4882a593Smuzhiyun .read = seq_read,
2032*4882a593Smuzhiyun .llseek = seq_lseek,
2033*4882a593Smuzhiyun .release = single_release,
2034*4882a593Smuzhiyun .write = rga_debug_write,
2035*4882a593Smuzhiyun };
2036*4882a593Smuzhiyun
rga_debugfs_add(void)2037*4882a593Smuzhiyun static void rga_debugfs_add(void)
2038*4882a593Smuzhiyun {
2039*4882a593Smuzhiyun struct dentry *rga_debug_root;
2040*4882a593Smuzhiyun struct dentry *ent;
2041*4882a593Smuzhiyun
2042*4882a593Smuzhiyun rga_debug_root = debugfs_create_dir("rga_debug", NULL);
2043*4882a593Smuzhiyun
2044*4882a593Smuzhiyun ent = debugfs_create_file("rga", 0644, rga_debug_root,
2045*4882a593Smuzhiyun NULL, &rga_debug_fops);
2046*4882a593Smuzhiyun if (!ent) {
2047*4882a593Smuzhiyun pr_err("create rga_debugfs err\n");
2048*4882a593Smuzhiyun debugfs_remove_recursive(rga_debug_root);
2049*4882a593Smuzhiyun }
2050*4882a593Smuzhiyun }
2051*4882a593Smuzhiyun
2052*4882a593Smuzhiyun #if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 4, 0))
rga_slt(void)2053*4882a593Smuzhiyun void rga_slt(void)
2054*4882a593Smuzhiyun {
2055*4882a593Smuzhiyun struct rga_req req;
2056*4882a593Smuzhiyun rga_session session;
2057*4882a593Smuzhiyun void *src_vir, *dst_vir;
2058*4882a593Smuzhiyun unsigned int *src, *dst;
2059*4882a593Smuzhiyun ion_phys_addr_t src_phy, dst_phy;
2060*4882a593Smuzhiyun int i;
2061*4882a593Smuzhiyun unsigned int srcW, srcH, dstW, dstH;
2062*4882a593Smuzhiyun struct ion_handle *src_handle;
2063*4882a593Smuzhiyun struct ion_handle *dst_handle;
2064*4882a593Smuzhiyun struct rga_drvdata *data;
2065*4882a593Smuzhiyun unsigned int srclen, dstlen;
2066*4882a593Smuzhiyun int err_count = 0;
2067*4882a593Smuzhiyun int right_count = 0;
2068*4882a593Smuzhiyun int size;
2069*4882a593Smuzhiyun unsigned int *pstd;
2070*4882a593Smuzhiyun unsigned int *pnow;
2071*4882a593Smuzhiyun
2072*4882a593Smuzhiyun data = rga_drvdata;
2073*4882a593Smuzhiyun srcW = 1280;
2074*4882a593Smuzhiyun srcH = 720;
2075*4882a593Smuzhiyun dstW = 1280;
2076*4882a593Smuzhiyun dstH = 720;
2077*4882a593Smuzhiyun src_handle = ion_alloc(data->ion_client, (size_t)srcW * srcH * 4, 0,
2078*4882a593Smuzhiyun ION_HEAP(ION_CMA_HEAP_ID), 0);
2079*4882a593Smuzhiyun
2080*4882a593Smuzhiyun dst_handle = ion_alloc(data->ion_client, (size_t)dstW * dstH * 4, 0,
2081*4882a593Smuzhiyun ION_HEAP(ION_CMA_HEAP_ID), 0);
2082*4882a593Smuzhiyun
2083*4882a593Smuzhiyun session.pid = current->pid;
2084*4882a593Smuzhiyun INIT_LIST_HEAD(&session.waiting);
2085*4882a593Smuzhiyun INIT_LIST_HEAD(&session.running);
2086*4882a593Smuzhiyun INIT_LIST_HEAD(&session.list_session);
2087*4882a593Smuzhiyun init_waitqueue_head(&session.wait);
2088*4882a593Smuzhiyun /* no need to protect */
2089*4882a593Smuzhiyun list_add_tail(&session.list_session, &rga_service.session);
2090*4882a593Smuzhiyun atomic_set(&session.task_running, 0);
2091*4882a593Smuzhiyun atomic_set(&session.num_done, 0);
2092*4882a593Smuzhiyun
2093*4882a593Smuzhiyun src_vir = ion_map_kernel(data->ion_client, src_handle);
2094*4882a593Smuzhiyun dst_vir = ion_map_kernel(data->ion_client, dst_handle);
2095*4882a593Smuzhiyun
2096*4882a593Smuzhiyun ion_phys(data->ion_client, src_handle, &src_phy, &srclen);
2097*4882a593Smuzhiyun ion_phys(data->ion_client, dst_handle, &dst_phy, &dstlen);
2098*4882a593Smuzhiyun
2099*4882a593Smuzhiyun memset(&req, 0, sizeof(struct rga_req));
2100*4882a593Smuzhiyun src = (unsigned int *)src_vir;
2101*4882a593Smuzhiyun dst = (unsigned int *)dst_vir;
2102*4882a593Smuzhiyun
2103*4882a593Smuzhiyun memset(src_vir, 0x80, srcW * srcH * 4);
2104*4882a593Smuzhiyun
2105*4882a593Smuzhiyun DBG("\n********************************\n");
2106*4882a593Smuzhiyun DBG("************ RGA_TEST ************\n");
2107*4882a593Smuzhiyun DBG("********************************\n\n");
2108*4882a593Smuzhiyun
2109*4882a593Smuzhiyun req.src.act_w = srcW;
2110*4882a593Smuzhiyun req.src.act_h = srcH;
2111*4882a593Smuzhiyun
2112*4882a593Smuzhiyun req.src.vir_w = srcW;
2113*4882a593Smuzhiyun req.src.vir_h = srcW;
2114*4882a593Smuzhiyun req.src.yrgb_addr = 0;
2115*4882a593Smuzhiyun req.src.uv_addr = src_phy;
2116*4882a593Smuzhiyun req.src.v_addr = src_phy + srcH * srcW;
2117*4882a593Smuzhiyun req.src.format = RK_FORMAT_RGBA_8888;
2118*4882a593Smuzhiyun
2119*4882a593Smuzhiyun req.dst.act_w = dstW;
2120*4882a593Smuzhiyun req.dst.act_h = dstH;
2121*4882a593Smuzhiyun
2122*4882a593Smuzhiyun req.dst.vir_w = dstW;
2123*4882a593Smuzhiyun req.dst.vir_h = dstH;
2124*4882a593Smuzhiyun req.dst.x_offset = 0;
2125*4882a593Smuzhiyun req.dst.y_offset = 0;
2126*4882a593Smuzhiyun
2127*4882a593Smuzhiyun req.dst.yrgb_addr = 0;
2128*4882a593Smuzhiyun req.dst.uv_addr = dst_phy;
2129*4882a593Smuzhiyun req.dst.v_addr = dst_phy + dstH * dstW;
2130*4882a593Smuzhiyun
2131*4882a593Smuzhiyun req.dst.format = RK_FORMAT_RGBA_8888;
2132*4882a593Smuzhiyun
2133*4882a593Smuzhiyun req.clip.xmin = 0;
2134*4882a593Smuzhiyun req.clip.xmax = dstW - 1;
2135*4882a593Smuzhiyun req.clip.ymin = 0;
2136*4882a593Smuzhiyun req.clip.ymax = dstH - 1;
2137*4882a593Smuzhiyun
2138*4882a593Smuzhiyun rga_blit_sync(&session, &req);
2139*4882a593Smuzhiyun
2140*4882a593Smuzhiyun size = dstW * dstH * 4;
2141*4882a593Smuzhiyun pstd = (unsigned int *)src_vir;
2142*4882a593Smuzhiyun pnow = (unsigned int *)dst_vir;
2143*4882a593Smuzhiyun
2144*4882a593Smuzhiyun DBG("[ num : srcInfo dstInfo ]\n");
2145*4882a593Smuzhiyun for (i = 0; i < size / 4; i++) {
2146*4882a593Smuzhiyun if (*pstd != *pnow) {
2147*4882a593Smuzhiyun DBG("[X%.8d:0x%x 0x%x]", i, *pstd, *pnow);
2148*4882a593Smuzhiyun if (i % 4 == 0)
2149*4882a593Smuzhiyun DBG("\n");
2150*4882a593Smuzhiyun err_count++;
2151*4882a593Smuzhiyun } else {
2152*4882a593Smuzhiyun if (i % (640 * 1024) == 0)
2153*4882a593Smuzhiyun DBG("[Y%.8d:0x%.8x 0x%.8x]\n", i,
2154*4882a593Smuzhiyun *pstd, *pnow);
2155*4882a593Smuzhiyun right_count++;
2156*4882a593Smuzhiyun }
2157*4882a593Smuzhiyun pstd++;
2158*4882a593Smuzhiyun pnow++;
2159*4882a593Smuzhiyun if (err_count > 64)
2160*4882a593Smuzhiyun break;
2161*4882a593Smuzhiyun }
2162*4882a593Smuzhiyun
2163*4882a593Smuzhiyun DBG("err_count=%d,right_count=%d\n", err_count, right_count);
2164*4882a593Smuzhiyun if (err_count != 0)
2165*4882a593Smuzhiyun DBG("rga slt err !!\n");
2166*4882a593Smuzhiyun else
2167*4882a593Smuzhiyun DBG("rga slt success !!\n");
2168*4882a593Smuzhiyun
2169*4882a593Smuzhiyun ion_unmap_kernel(data->ion_client, src_handle);
2170*4882a593Smuzhiyun ion_unmap_kernel(data->ion_client, dst_handle);
2171*4882a593Smuzhiyun
2172*4882a593Smuzhiyun ion_free(data->ion_client, src_handle);
2173*4882a593Smuzhiyun ion_free(data->ion_client, dst_handle);
2174*4882a593Smuzhiyun }
2175*4882a593Smuzhiyun #else
2176*4882a593Smuzhiyun unsigned long src1_buf[400 * 200];
2177*4882a593Smuzhiyun unsigned long dst1_buf[400 * 200];
rga_slt(void)2178*4882a593Smuzhiyun void rga_slt(void)
2179*4882a593Smuzhiyun {
2180*4882a593Smuzhiyun struct rga_req req;
2181*4882a593Smuzhiyun rga_session session;
2182*4882a593Smuzhiyun unsigned long *src_vir, *dst_vir;
2183*4882a593Smuzhiyun int i;
2184*4882a593Smuzhiyun unsigned int srcW, srcH, dstW, dstH;
2185*4882a593Smuzhiyun int err_count = 0;
2186*4882a593Smuzhiyun int right_count = 0;
2187*4882a593Smuzhiyun int size;
2188*4882a593Smuzhiyun unsigned int *pstd;
2189*4882a593Smuzhiyun unsigned int *pnow;
2190*4882a593Smuzhiyun
2191*4882a593Smuzhiyun srcW = 400;
2192*4882a593Smuzhiyun srcH = 200;
2193*4882a593Smuzhiyun dstW = 400;
2194*4882a593Smuzhiyun dstH = 200;
2195*4882a593Smuzhiyun
2196*4882a593Smuzhiyun session.pid = current->pid;
2197*4882a593Smuzhiyun INIT_LIST_HEAD(&session.waiting);
2198*4882a593Smuzhiyun INIT_LIST_HEAD(&session.running);
2199*4882a593Smuzhiyun INIT_LIST_HEAD(&session.list_session);
2200*4882a593Smuzhiyun init_waitqueue_head(&session.wait);
2201*4882a593Smuzhiyun /* no need to protect */
2202*4882a593Smuzhiyun list_add_tail(&session.list_session, &rga_service.session);
2203*4882a593Smuzhiyun atomic_set(&session.task_running, 0);
2204*4882a593Smuzhiyun atomic_set(&session.num_done, 0);
2205*4882a593Smuzhiyun
2206*4882a593Smuzhiyun memset(&req, 0, sizeof(struct rga_req));
2207*4882a593Smuzhiyun src_vir = src1_buf;
2208*4882a593Smuzhiyun dst_vir = dst1_buf;
2209*4882a593Smuzhiyun
2210*4882a593Smuzhiyun memset(src1_buf, 0x50, 400 * 200 * 4);
2211*4882a593Smuzhiyun memset(dst1_buf, 0x00, 400 * 200 * 4);
2212*4882a593Smuzhiyun
2213*4882a593Smuzhiyun rga_dma_flush_range(&src1_buf[0], &src1_buf[400 * 200]);
2214*4882a593Smuzhiyun
2215*4882a593Smuzhiyun DBG("\n********************************\n");
2216*4882a593Smuzhiyun DBG("************ RGA_TEST ************\n");
2217*4882a593Smuzhiyun DBG("********************************\n\n");
2218*4882a593Smuzhiyun
2219*4882a593Smuzhiyun req.src.act_w = srcW;
2220*4882a593Smuzhiyun req.src.act_h = srcH;
2221*4882a593Smuzhiyun
2222*4882a593Smuzhiyun req.src.vir_w = srcW;
2223*4882a593Smuzhiyun req.src.vir_h = srcW;
2224*4882a593Smuzhiyun req.src.yrgb_addr = 0;
2225*4882a593Smuzhiyun req.src.uv_addr = (unsigned long)virt_to_phys(src_vir);
2226*4882a593Smuzhiyun req.src.v_addr = req.src.uv_addr + srcH * srcW;
2227*4882a593Smuzhiyun req.src.format = RK_FORMAT_RGBA_8888;
2228*4882a593Smuzhiyun
2229*4882a593Smuzhiyun req.dst.act_w = dstW;
2230*4882a593Smuzhiyun req.dst.act_h = dstH;
2231*4882a593Smuzhiyun
2232*4882a593Smuzhiyun req.dst.vir_w = dstW;
2233*4882a593Smuzhiyun req.dst.vir_h = dstH;
2234*4882a593Smuzhiyun req.dst.x_offset = 0;
2235*4882a593Smuzhiyun req.dst.y_offset = 0;
2236*4882a593Smuzhiyun
2237*4882a593Smuzhiyun req.dst.yrgb_addr = 0;
2238*4882a593Smuzhiyun req.dst.uv_addr = (unsigned long)virt_to_phys(dst_vir);
2239*4882a593Smuzhiyun req.dst.v_addr = req.dst.uv_addr + dstH * dstW;
2240*4882a593Smuzhiyun
2241*4882a593Smuzhiyun req.dst.format = RK_FORMAT_RGBA_8888;
2242*4882a593Smuzhiyun rga_blit_sync(&session, &req);
2243*4882a593Smuzhiyun size = dstW * dstH * 4;
2244*4882a593Smuzhiyun pstd = (unsigned int *)src_vir;
2245*4882a593Smuzhiyun pnow = (unsigned int *)dst_vir;
2246*4882a593Smuzhiyun
2247*4882a593Smuzhiyun DBG("[ num : srcInfo dstInfo ]\n");
2248*4882a593Smuzhiyun for (i = 0; i < size / 4; i++) {
2249*4882a593Smuzhiyun if (*pstd != *pnow) {
2250*4882a593Smuzhiyun DBG("[X%.8d:0x%x 0x%x]", i, *pstd, *pnow);
2251*4882a593Smuzhiyun if (i % 4 == 0)
2252*4882a593Smuzhiyun DBG("\n");
2253*4882a593Smuzhiyun err_count++;
2254*4882a593Smuzhiyun } else {
2255*4882a593Smuzhiyun if (i % (640 * 1024) == 0)
2256*4882a593Smuzhiyun DBG("[Y%.8d:0x%.8x 0x%.8x]\n", i,
2257*4882a593Smuzhiyun *pstd, *pnow);
2258*4882a593Smuzhiyun right_count++;
2259*4882a593Smuzhiyun }
2260*4882a593Smuzhiyun pstd++;
2261*4882a593Smuzhiyun pnow++;
2262*4882a593Smuzhiyun if (err_count > 64)
2263*4882a593Smuzhiyun break;
2264*4882a593Smuzhiyun }
2265*4882a593Smuzhiyun
2266*4882a593Smuzhiyun DBG("err_count=%d, right_count=%d\n", err_count, right_count);
2267*4882a593Smuzhiyun if (err_count != 0)
2268*4882a593Smuzhiyun DBG("rga slt err !!\n");
2269*4882a593Smuzhiyun else
2270*4882a593Smuzhiyun DBG("rga slt success !!\n");
2271*4882a593Smuzhiyun }
2272*4882a593Smuzhiyun #endif
2273*4882a593Smuzhiyun #endif
2274*4882a593Smuzhiyun
2275*4882a593Smuzhiyun void rga_test_0(void);
2276*4882a593Smuzhiyun void rga_test_1(void);
2277*4882a593Smuzhiyun
rga_init(void)2278*4882a593Smuzhiyun static int __init rga_init(void)
2279*4882a593Smuzhiyun {
2280*4882a593Smuzhiyun int i, ret;
2281*4882a593Smuzhiyun void * pre_scale_page_buf;
2282*4882a593Smuzhiyun uint32_t *pre_scale_page_table;
2283*4882a593Smuzhiyun uint32_t *mmu_base;
2284*4882a593Smuzhiyun struct page **pages;
2285*4882a593Smuzhiyun
2286*4882a593Smuzhiyun /* malloc pre scale mid buf mmu table */
2287*4882a593Smuzhiyun pre_scale_page_table = kzalloc(RGA_PRE_SCALE_PAGE_SIZE * sizeof(*pre_scale_page_table),
2288*4882a593Smuzhiyun GFP_KERNEL);
2289*4882a593Smuzhiyun if(pre_scale_page_table == NULL) {
2290*4882a593Smuzhiyun pr_err("RGA alloc pre-scale page table failed.\n");
2291*4882a593Smuzhiyun return -ENOMEM;
2292*4882a593Smuzhiyun }
2293*4882a593Smuzhiyun
2294*4882a593Smuzhiyun /* alloc reserved pre-scale buf */
2295*4882a593Smuzhiyun for(i = 0; i < RGA_PRE_SCALE_PAGE_SIZE; i++) {
2296*4882a593Smuzhiyun pre_scale_page_buf = (void *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
2297*4882a593Smuzhiyun if(pre_scale_page_buf == NULL) {
2298*4882a593Smuzhiyun printk(KERN_ERR "RGA init pre scale page_table[%d] falied\n", i);
2299*4882a593Smuzhiyun ret = -ENOMEM;
2300*4882a593Smuzhiyun goto free_pre_scale_page_table;
2301*4882a593Smuzhiyun }
2302*4882a593Smuzhiyun pre_scale_page_table[i] = (uint32_t)virt_to_phys(pre_scale_page_buf);
2303*4882a593Smuzhiyun }
2304*4882a593Smuzhiyun
2305*4882a593Smuzhiyun mmu_base = kmalloc(1024 * 256, GFP_KERNEL);
2306*4882a593Smuzhiyun if (mmu_base == NULL) {
2307*4882a593Smuzhiyun pr_err("RGA alloc mmu buffer failed.\n");
2308*4882a593Smuzhiyun ret = -ENOMEM;
2309*4882a593Smuzhiyun goto free_pre_scale_page_table;
2310*4882a593Smuzhiyun }
2311*4882a593Smuzhiyun
2312*4882a593Smuzhiyun pages = kmalloc((32768)* sizeof(struct page *), GFP_KERNEL);
2313*4882a593Smuzhiyun if (pages == NULL) {
2314*4882a593Smuzhiyun pr_err("RGA alloc pages buffer failed.\n");
2315*4882a593Smuzhiyun ret = -ENOMEM;
2316*4882a593Smuzhiyun goto free_mmu_base;
2317*4882a593Smuzhiyun }
2318*4882a593Smuzhiyun
2319*4882a593Smuzhiyun ret = platform_driver_register(&rga_driver);
2320*4882a593Smuzhiyun if (ret != 0) {
2321*4882a593Smuzhiyun printk(KERN_ERR "Platform device register failed (%d).\n", ret);
2322*4882a593Smuzhiyun goto free_pages_buf;
2323*4882a593Smuzhiyun }
2324*4882a593Smuzhiyun
2325*4882a593Smuzhiyun rga_service.pre_scale_buf = pre_scale_page_table;
2326*4882a593Smuzhiyun
2327*4882a593Smuzhiyun rga_mmu_buf.buf_virtual = mmu_base;
2328*4882a593Smuzhiyun #if (defined(CONFIG_ARM) && defined(CONFIG_ARM_LPAE))
2329*4882a593Smuzhiyun rga_mmu_buf.buf = (uint32_t *)(uint32_t)virt_to_phys((void *)((unsigned long)mmu_base));
2330*4882a593Smuzhiyun #else
2331*4882a593Smuzhiyun rga_mmu_buf.buf = (uint32_t *)virt_to_phys((void *)((unsigned long)mmu_base));
2332*4882a593Smuzhiyun #endif
2333*4882a593Smuzhiyun rga_mmu_buf.front = 0;
2334*4882a593Smuzhiyun rga_mmu_buf.back = 64*1024;
2335*4882a593Smuzhiyun rga_mmu_buf.size = 64*1024;
2336*4882a593Smuzhiyun
2337*4882a593Smuzhiyun rga_mmu_buf.pages = pages;
2338*4882a593Smuzhiyun
2339*4882a593Smuzhiyun rga_session_global.pid = 0x0000ffff;
2340*4882a593Smuzhiyun INIT_LIST_HEAD(&rga_session_global.waiting);
2341*4882a593Smuzhiyun INIT_LIST_HEAD(&rga_session_global.running);
2342*4882a593Smuzhiyun INIT_LIST_HEAD(&rga_session_global.list_session);
2343*4882a593Smuzhiyun
2344*4882a593Smuzhiyun INIT_LIST_HEAD(&rga_service.waiting);
2345*4882a593Smuzhiyun INIT_LIST_HEAD(&rga_service.running);
2346*4882a593Smuzhiyun INIT_LIST_HEAD(&rga_service.done);
2347*4882a593Smuzhiyun INIT_LIST_HEAD(&rga_service.session);
2348*4882a593Smuzhiyun
2349*4882a593Smuzhiyun init_waitqueue_head(&rga_session_global.wait);
2350*4882a593Smuzhiyun //mutex_lock(&rga_service.lock);
2351*4882a593Smuzhiyun list_add_tail(&rga_session_global.list_session, &rga_service.session);
2352*4882a593Smuzhiyun //mutex_unlock(&rga_service.lock);
2353*4882a593Smuzhiyun atomic_set(&rga_session_global.task_running, 0);
2354*4882a593Smuzhiyun atomic_set(&rga_session_global.num_done, 0);
2355*4882a593Smuzhiyun
2356*4882a593Smuzhiyun #if RGA_TEST_CASE
2357*4882a593Smuzhiyun rga_test_0();
2358*4882a593Smuzhiyun #endif
2359*4882a593Smuzhiyun #if RGA_DEBUGFS
2360*4882a593Smuzhiyun rga_debugfs_add();
2361*4882a593Smuzhiyun #endif
2362*4882a593Smuzhiyun
2363*4882a593Smuzhiyun INFO("RGA Module initialized.\n");
2364*4882a593Smuzhiyun
2365*4882a593Smuzhiyun return 0;
2366*4882a593Smuzhiyun
2367*4882a593Smuzhiyun free_pages_buf:
2368*4882a593Smuzhiyun kfree(pages);
2369*4882a593Smuzhiyun
2370*4882a593Smuzhiyun free_mmu_base:
2371*4882a593Smuzhiyun kfree(mmu_base);
2372*4882a593Smuzhiyun
2373*4882a593Smuzhiyun free_pre_scale_page_table:
2374*4882a593Smuzhiyun for (i = 0; i < RGA_PRE_SCALE_PAGE_SIZE; i++)
2375*4882a593Smuzhiyun if (pre_scale_page_table[i] != 0)
2376*4882a593Smuzhiyun kfree(phys_to_virt((phys_addr_t)pre_scale_page_table[i]));
2377*4882a593Smuzhiyun
2378*4882a593Smuzhiyun kfree(pre_scale_page_table);
2379*4882a593Smuzhiyun
2380*4882a593Smuzhiyun return ret;
2381*4882a593Smuzhiyun }
2382*4882a593Smuzhiyun
rga_exit(void)2383*4882a593Smuzhiyun static void __exit rga_exit(void)
2384*4882a593Smuzhiyun {
2385*4882a593Smuzhiyun phys_addr_t pre_scale_buf;
2386*4882a593Smuzhiyun
2387*4882a593Smuzhiyun rga_power_off();
2388*4882a593Smuzhiyun
2389*4882a593Smuzhiyun if (rga_service.pre_scale_buf != NULL) {
2390*4882a593Smuzhiyun pre_scale_buf = (phys_addr_t)rga_service.pre_scale_buf[0];
2391*4882a593Smuzhiyun if (pre_scale_buf)
2392*4882a593Smuzhiyun kfree(phys_to_virt(pre_scale_buf));
2393*4882a593Smuzhiyun kfree(rga_service.pre_scale_buf);
2394*4882a593Smuzhiyun }
2395*4882a593Smuzhiyun kfree(rga_mmu_buf.buf_virtual);
2396*4882a593Smuzhiyun kfree(rga_mmu_buf.pages);
2397*4882a593Smuzhiyun
2398*4882a593Smuzhiyun platform_driver_unregister(&rga_driver);
2399*4882a593Smuzhiyun }
2400*4882a593Smuzhiyun
2401*4882a593Smuzhiyun #if RGA_TEST_CASE
2402*4882a593Smuzhiyun
2403*4882a593Smuzhiyun extern struct fb_info * rk_get_fb(int fb_id);
2404*4882a593Smuzhiyun EXPORT_SYMBOL(rk_get_fb);
2405*4882a593Smuzhiyun
2406*4882a593Smuzhiyun extern void rk_direct_fb_show(struct fb_info * fbi);
2407*4882a593Smuzhiyun EXPORT_SYMBOL(rk_direct_fb_show);
2408*4882a593Smuzhiyun
2409*4882a593Smuzhiyun unsigned int src_buf[1920*1080];
2410*4882a593Smuzhiyun unsigned int dst_buf[1920*1080];
2411*4882a593Smuzhiyun //unsigned int tmp_buf[1920*1080 * 2];
2412*4882a593Smuzhiyun
rga_test_0(void)2413*4882a593Smuzhiyun void rga_test_0(void)
2414*4882a593Smuzhiyun {
2415*4882a593Smuzhiyun struct rga_req req;
2416*4882a593Smuzhiyun rga_session session;
2417*4882a593Smuzhiyun unsigned int *src, *dst;
2418*4882a593Smuzhiyun uint32_t i, j;
2419*4882a593Smuzhiyun uint8_t *p;
2420*4882a593Smuzhiyun uint8_t t;
2421*4882a593Smuzhiyun uint32_t *dst0, *dst1, *dst2;
2422*4882a593Smuzhiyun
2423*4882a593Smuzhiyun struct fb_info *fb;
2424*4882a593Smuzhiyun
2425*4882a593Smuzhiyun session.pid = current->pid;
2426*4882a593Smuzhiyun INIT_LIST_HEAD(&session.waiting);
2427*4882a593Smuzhiyun INIT_LIST_HEAD(&session.running);
2428*4882a593Smuzhiyun INIT_LIST_HEAD(&session.list_session);
2429*4882a593Smuzhiyun init_waitqueue_head(&session.wait);
2430*4882a593Smuzhiyun /* no need to protect */
2431*4882a593Smuzhiyun list_add_tail(&session.list_session, &rga_service.session);
2432*4882a593Smuzhiyun atomic_set(&session.task_running, 0);
2433*4882a593Smuzhiyun atomic_set(&session.num_done, 0);
2434*4882a593Smuzhiyun //file->private_data = (void *)session;
2435*4882a593Smuzhiyun
2436*4882a593Smuzhiyun fb = rk_get_fb(0);
2437*4882a593Smuzhiyun
2438*4882a593Smuzhiyun memset(&req, 0, sizeof(struct rga_req));
2439*4882a593Smuzhiyun src = src_buf;
2440*4882a593Smuzhiyun dst = dst_buf;
2441*4882a593Smuzhiyun
2442*4882a593Smuzhiyun memset(src_buf, 0x80, 1024*600*4);
2443*4882a593Smuzhiyun
2444*4882a593Smuzhiyun dmac_flush_range(&src_buf[0], &src_buf[1024*600]);
2445*4882a593Smuzhiyun outer_flush_range(virt_to_phys(&src_buf[0]),virt_to_phys(&src_buf[1024*600]));
2446*4882a593Smuzhiyun
2447*4882a593Smuzhiyun
2448*4882a593Smuzhiyun #if 0
2449*4882a593Smuzhiyun memset(src_buf, 0x80, 800*480*4);
2450*4882a593Smuzhiyun memset(dst_buf, 0xcc, 800*480*4);
2451*4882a593Smuzhiyun
2452*4882a593Smuzhiyun dmac_flush_range(&dst_buf[0], &dst_buf[800*480]);
2453*4882a593Smuzhiyun outer_flush_range(virt_to_phys(&dst_buf[0]),virt_to_phys(&dst_buf[800*480]));
2454*4882a593Smuzhiyun #endif
2455*4882a593Smuzhiyun
2456*4882a593Smuzhiyun dst0 = &dst_buf[0];
2457*4882a593Smuzhiyun //dst1 = &dst_buf[1280*800*4];
2458*4882a593Smuzhiyun //dst2 = &dst_buf[1280*800*4*2];
2459*4882a593Smuzhiyun
2460*4882a593Smuzhiyun i = j = 0;
2461*4882a593Smuzhiyun
2462*4882a593Smuzhiyun printk("\n********************************\n");
2463*4882a593Smuzhiyun printk("************ RGA_TEST ************\n");
2464*4882a593Smuzhiyun printk("********************************\n\n");
2465*4882a593Smuzhiyun
2466*4882a593Smuzhiyun req.src.act_w = 1024;
2467*4882a593Smuzhiyun req.src.act_h = 600;
2468*4882a593Smuzhiyun
2469*4882a593Smuzhiyun req.src.vir_w = 1024;
2470*4882a593Smuzhiyun req.src.vir_h = 600;
2471*4882a593Smuzhiyun req.src.yrgb_addr = (uint32_t)virt_to_phys(src);
2472*4882a593Smuzhiyun req.src.uv_addr = (uint32_t)(req.src.yrgb_addr + 1080*1920);
2473*4882a593Smuzhiyun req.src.v_addr = (uint32_t)virt_to_phys(src);
2474*4882a593Smuzhiyun req.src.format = RK_FORMAT_RGBA_8888;
2475*4882a593Smuzhiyun
2476*4882a593Smuzhiyun req.dst.act_w = 600;
2477*4882a593Smuzhiyun req.dst.act_h = 352;
2478*4882a593Smuzhiyun
2479*4882a593Smuzhiyun req.dst.vir_w = 1280;
2480*4882a593Smuzhiyun req.dst.vir_h = 800;
2481*4882a593Smuzhiyun req.dst.x_offset = 600;
2482*4882a593Smuzhiyun req.dst.y_offset = 0;
2483*4882a593Smuzhiyun
2484*4882a593Smuzhiyun dst = dst0;
2485*4882a593Smuzhiyun
2486*4882a593Smuzhiyun req.dst.yrgb_addr = ((uint32_t)virt_to_phys(dst));
2487*4882a593Smuzhiyun
2488*4882a593Smuzhiyun //req.dst.format = RK_FORMAT_RGB_565;
2489*4882a593Smuzhiyun
2490*4882a593Smuzhiyun req.clip.xmin = 0;
2491*4882a593Smuzhiyun req.clip.xmax = 1279;
2492*4882a593Smuzhiyun req.clip.ymin = 0;
2493*4882a593Smuzhiyun req.clip.ymax = 799;
2494*4882a593Smuzhiyun
2495*4882a593Smuzhiyun //req.render_mode = color_fill_mode;
2496*4882a593Smuzhiyun //req.fg_color = 0x80ffffff;
2497*4882a593Smuzhiyun
2498*4882a593Smuzhiyun req.rotate_mode = 1;
2499*4882a593Smuzhiyun //req.scale_mode = 2;
2500*4882a593Smuzhiyun
2501*4882a593Smuzhiyun //req.alpha_rop_flag = 0;
2502*4882a593Smuzhiyun //req.alpha_rop_mode = 0x19;
2503*4882a593Smuzhiyun //req.PD_mode = 3;
2504*4882a593Smuzhiyun
2505*4882a593Smuzhiyun req.sina = 65536;
2506*4882a593Smuzhiyun req.cosa = 0;
2507*4882a593Smuzhiyun
2508*4882a593Smuzhiyun //req.mmu_info.mmu_flag = 0x21;
2509*4882a593Smuzhiyun //req.mmu_info.mmu_en = 1;
2510*4882a593Smuzhiyun
2511*4882a593Smuzhiyun //printk("src = %.8x\n", req.src.yrgb_addr);
2512*4882a593Smuzhiyun //printk("src = %.8x\n", req.src.uv_addr);
2513*4882a593Smuzhiyun //printk("dst = %.8x\n", req.dst.yrgb_addr);
2514*4882a593Smuzhiyun
2515*4882a593Smuzhiyun
2516*4882a593Smuzhiyun rga_blit_sync(&session, &req);
2517*4882a593Smuzhiyun
2518*4882a593Smuzhiyun #if 1
2519*4882a593Smuzhiyun fb->var.bits_per_pixel = 32;
2520*4882a593Smuzhiyun
2521*4882a593Smuzhiyun fb->var.xres = 1280;
2522*4882a593Smuzhiyun fb->var.yres = 800;
2523*4882a593Smuzhiyun
2524*4882a593Smuzhiyun fb->var.red.length = 8;
2525*4882a593Smuzhiyun fb->var.red.offset = 0;
2526*4882a593Smuzhiyun fb->var.red.msb_right = 0;
2527*4882a593Smuzhiyun
2528*4882a593Smuzhiyun fb->var.green.length = 8;
2529*4882a593Smuzhiyun fb->var.green.offset = 8;
2530*4882a593Smuzhiyun fb->var.green.msb_right = 0;
2531*4882a593Smuzhiyun
2532*4882a593Smuzhiyun fb->var.blue.length = 8;
2533*4882a593Smuzhiyun
2534*4882a593Smuzhiyun fb->var.blue.offset = 16;
2535*4882a593Smuzhiyun fb->var.blue.msb_right = 0;
2536*4882a593Smuzhiyun
2537*4882a593Smuzhiyun fb->var.transp.length = 8;
2538*4882a593Smuzhiyun fb->var.transp.offset = 24;
2539*4882a593Smuzhiyun fb->var.transp.msb_right = 0;
2540*4882a593Smuzhiyun
2541*4882a593Smuzhiyun fb->var.nonstd &= (~0xff);
2542*4882a593Smuzhiyun fb->var.nonstd |= 1;
2543*4882a593Smuzhiyun
2544*4882a593Smuzhiyun fb->fix.smem_start = virt_to_phys(dst);
2545*4882a593Smuzhiyun
2546*4882a593Smuzhiyun rk_direct_fb_show(fb);
2547*4882a593Smuzhiyun #endif
2548*4882a593Smuzhiyun
2549*4882a593Smuzhiyun }
2550*4882a593Smuzhiyun
2551*4882a593Smuzhiyun #endif
2552*4882a593Smuzhiyun #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 1, 0))
2553*4882a593Smuzhiyun fs_initcall(rga_init);
2554*4882a593Smuzhiyun #else
2555*4882a593Smuzhiyun module_init(rga_init);
2556*4882a593Smuzhiyun #endif
2557*4882a593Smuzhiyun module_exit(rga_exit);
2558*4882a593Smuzhiyun
2559*4882a593Smuzhiyun /* Module information */
2560*4882a593Smuzhiyun MODULE_AUTHOR("zsq@rock-chips.com");
2561*4882a593Smuzhiyun MODULE_DESCRIPTION("Driver for rga device");
2562*4882a593Smuzhiyun MODULE_LICENSE("GPL");
2563