1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0 */
2*4882a593Smuzhiyun
3*4882a593Smuzhiyun #define pr_fmt(fmt) "rga2_mmu: " fmt
4*4882a593Smuzhiyun #include <linux/version.h>
5*4882a593Smuzhiyun #include <linux/init.h>
6*4882a593Smuzhiyun #include <linux/module.h>
7*4882a593Smuzhiyun #include <linux/fs.h>
8*4882a593Smuzhiyun #include <linux/sched.h>
9*4882a593Smuzhiyun #include <linux/signal.h>
10*4882a593Smuzhiyun #include <linux/pagemap.h>
11*4882a593Smuzhiyun #include <linux/seq_file.h>
12*4882a593Smuzhiyun #include <linux/mm.h>
13*4882a593Smuzhiyun #include <linux/mman.h>
14*4882a593Smuzhiyun #include <linux/sched.h>
15*4882a593Smuzhiyun #include <linux/slab.h>
16*4882a593Smuzhiyun #include <linux/memory.h>
17*4882a593Smuzhiyun #include <linux/dma-mapping.h>
18*4882a593Smuzhiyun #include <linux/scatterlist.h>
19*4882a593Smuzhiyun #include <asm/memory.h>
20*4882a593Smuzhiyun #include <asm/atomic.h>
21*4882a593Smuzhiyun #include <asm/cacheflush.h>
22*4882a593Smuzhiyun #if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 4, 0))
23*4882a593Smuzhiyun #include <linux/rockchip_ion.h>
24*4882a593Smuzhiyun #endif
25*4882a593Smuzhiyun #include "rga2_mmu_info.h"
26*4882a593Smuzhiyun #include "rga2_debugger.h"
27*4882a593Smuzhiyun
28*4882a593Smuzhiyun extern struct rga2_service_info rga2_service;
29*4882a593Smuzhiyun extern struct rga2_mmu_buf_t rga2_mmu_buf;
30*4882a593Smuzhiyun extern struct rga2_drvdata_t *rga2_drvdata;
31*4882a593Smuzhiyun
32*4882a593Smuzhiyun //extern int mmu_buff_temp[1024];
33*4882a593Smuzhiyun
34*4882a593Smuzhiyun #define KERNEL_SPACE_VALID 0xc0000000
35*4882a593Smuzhiyun
36*4882a593Smuzhiyun #define V7_VATOPA_SUCESS_MASK (0x1)
37*4882a593Smuzhiyun #define V7_VATOPA_GET_PADDR(X) (X & 0xFFFFF000)
38*4882a593Smuzhiyun #define V7_VATOPA_GET_INER(X) ((X>>4) & 7)
39*4882a593Smuzhiyun #define V7_VATOPA_GET_OUTER(X) ((X>>2) & 3)
40*4882a593Smuzhiyun #define V7_VATOPA_GET_SH(X) ((X>>7) & 1)
41*4882a593Smuzhiyun #define V7_VATOPA_GET_NS(X) ((X>>9) & 1)
42*4882a593Smuzhiyun #define V7_VATOPA_GET_SS(X) ((X>>1) & 1)
43*4882a593Smuzhiyun
rga2_dma_flush_range(void * pstart,void * pend)44*4882a593Smuzhiyun void rga2_dma_flush_range(void *pstart, void *pend)
45*4882a593Smuzhiyun {
46*4882a593Smuzhiyun dma_sync_single_for_device(rga2_drvdata->dev, virt_to_phys(pstart), pend - pstart, DMA_TO_DEVICE);
47*4882a593Smuzhiyun }
48*4882a593Smuzhiyun
rga2_dma_flush_page(struct page * page,int map)49*4882a593Smuzhiyun dma_addr_t rga2_dma_flush_page(struct page *page, int map)
50*4882a593Smuzhiyun {
51*4882a593Smuzhiyun dma_addr_t paddr;
52*4882a593Smuzhiyun
53*4882a593Smuzhiyun /*
54*4882a593Smuzhiyun * Through dma_map_page to ensure that the physical address
55*4882a593Smuzhiyun * will not exceed the addressing range of dma.
56*4882a593Smuzhiyun */
57*4882a593Smuzhiyun if (map & MMU_MAP_MASK) {
58*4882a593Smuzhiyun switch (map) {
59*4882a593Smuzhiyun case MMU_MAP_CLEAN:
60*4882a593Smuzhiyun paddr = dma_map_page(rga2_drvdata->dev, page, 0,
61*4882a593Smuzhiyun PAGE_SIZE, DMA_TO_DEVICE);
62*4882a593Smuzhiyun break;
63*4882a593Smuzhiyun case MMU_MAP_INVALID:
64*4882a593Smuzhiyun paddr = dma_map_page(rga2_drvdata->dev, page, 0,
65*4882a593Smuzhiyun PAGE_SIZE, DMA_FROM_DEVICE);
66*4882a593Smuzhiyun break;
67*4882a593Smuzhiyun case MMU_MAP_CLEAN | MMU_MAP_INVALID:
68*4882a593Smuzhiyun paddr = dma_map_page(rga2_drvdata->dev, page, 0,
69*4882a593Smuzhiyun PAGE_SIZE, DMA_BIDIRECTIONAL);
70*4882a593Smuzhiyun break;
71*4882a593Smuzhiyun default:
72*4882a593Smuzhiyun paddr = 0;
73*4882a593Smuzhiyun pr_err("unknown map cmd 0x%x\n", map);
74*4882a593Smuzhiyun break;
75*4882a593Smuzhiyun }
76*4882a593Smuzhiyun
77*4882a593Smuzhiyun return paddr;
78*4882a593Smuzhiyun } else if (map & MMU_UNMAP_MASK) {
79*4882a593Smuzhiyun paddr = page_to_phys(page);
80*4882a593Smuzhiyun
81*4882a593Smuzhiyun switch (map) {
82*4882a593Smuzhiyun case MMU_UNMAP_CLEAN:
83*4882a593Smuzhiyun dma_unmap_page(rga2_drvdata->dev, paddr,
84*4882a593Smuzhiyun PAGE_SIZE, DMA_TO_DEVICE);
85*4882a593Smuzhiyun break;
86*4882a593Smuzhiyun case MMU_UNMAP_INVALID:
87*4882a593Smuzhiyun dma_unmap_page(rga2_drvdata->dev, paddr,
88*4882a593Smuzhiyun PAGE_SIZE, DMA_FROM_DEVICE);
89*4882a593Smuzhiyun break;
90*4882a593Smuzhiyun case MMU_UNMAP_CLEAN | MMU_UNMAP_INVALID:
91*4882a593Smuzhiyun dma_unmap_page(rga2_drvdata->dev, paddr,
92*4882a593Smuzhiyun PAGE_SIZE, DMA_BIDIRECTIONAL);
93*4882a593Smuzhiyun break;
94*4882a593Smuzhiyun default:
95*4882a593Smuzhiyun pr_err("unknown map cmd 0x%x\n", map);
96*4882a593Smuzhiyun break;
97*4882a593Smuzhiyun }
98*4882a593Smuzhiyun
99*4882a593Smuzhiyun return paddr;
100*4882a593Smuzhiyun }
101*4882a593Smuzhiyun
102*4882a593Smuzhiyun pr_err("RGA2 failed to flush page, map= %x\n", map);
103*4882a593Smuzhiyun return 0;
104*4882a593Smuzhiyun }
105*4882a593Smuzhiyun
106*4882a593Smuzhiyun #if 0
107*4882a593Smuzhiyun static unsigned int armv7_va_to_pa(unsigned int v_addr)
108*4882a593Smuzhiyun {
109*4882a593Smuzhiyun unsigned int p_addr;
110*4882a593Smuzhiyun __asm__ volatile ( "mcr p15, 0, %1, c7, c8, 0\n"
111*4882a593Smuzhiyun "isb\n"
112*4882a593Smuzhiyun "dsb\n"
113*4882a593Smuzhiyun "mrc p15, 0, %0, c7, c4, 0\n"
114*4882a593Smuzhiyun : "=r" (p_addr)
115*4882a593Smuzhiyun : "r" (v_addr)
116*4882a593Smuzhiyun : "cc");
117*4882a593Smuzhiyun
118*4882a593Smuzhiyun if (p_addr & V7_VATOPA_SUCESS_MASK)
119*4882a593Smuzhiyun return 0xFFFFFFFF;
120*4882a593Smuzhiyun else
121*4882a593Smuzhiyun return (V7_VATOPA_GET_SS(p_addr) ? 0xFFFFFFFF : V7_VATOPA_GET_PADDR(p_addr));
122*4882a593Smuzhiyun }
123*4882a593Smuzhiyun #endif
124*4882a593Smuzhiyun
rga2_is_yuv422p_format(u32 format)125*4882a593Smuzhiyun static bool rga2_is_yuv422p_format(u32 format)
126*4882a593Smuzhiyun {
127*4882a593Smuzhiyun bool ret = false;
128*4882a593Smuzhiyun
129*4882a593Smuzhiyun switch (format) {
130*4882a593Smuzhiyun case RGA2_FORMAT_YCbCr_422_P:
131*4882a593Smuzhiyun case RGA2_FORMAT_YCrCb_422_P:
132*4882a593Smuzhiyun ret = true;
133*4882a593Smuzhiyun break;
134*4882a593Smuzhiyun }
135*4882a593Smuzhiyun return ret;
136*4882a593Smuzhiyun }
137*4882a593Smuzhiyun
138*4882a593Smuzhiyun #ifdef CONFIG_ROCKCHIP_RGA2_DEBUGGER
rga2_get_format_bits(u32 format)139*4882a593Smuzhiyun static int rga2_get_format_bits(u32 format)
140*4882a593Smuzhiyun {
141*4882a593Smuzhiyun int bits = 0;
142*4882a593Smuzhiyun
143*4882a593Smuzhiyun switch (format) {
144*4882a593Smuzhiyun case RGA2_FORMAT_RGBA_8888:
145*4882a593Smuzhiyun case RGA2_FORMAT_RGBX_8888:
146*4882a593Smuzhiyun case RGA2_FORMAT_BGRA_8888:
147*4882a593Smuzhiyun case RGA2_FORMAT_BGRX_8888:
148*4882a593Smuzhiyun case RGA2_FORMAT_ARGB_8888:
149*4882a593Smuzhiyun case RGA2_FORMAT_XRGB_8888:
150*4882a593Smuzhiyun case RGA2_FORMAT_ABGR_8888:
151*4882a593Smuzhiyun case RGA2_FORMAT_XBGR_8888:
152*4882a593Smuzhiyun bits = 32;
153*4882a593Smuzhiyun break;
154*4882a593Smuzhiyun case RGA2_FORMAT_RGB_888:
155*4882a593Smuzhiyun case RGA2_FORMAT_BGR_888:
156*4882a593Smuzhiyun bits = 24;
157*4882a593Smuzhiyun break;
158*4882a593Smuzhiyun case RGA2_FORMAT_RGB_565:
159*4882a593Smuzhiyun case RGA2_FORMAT_RGBA_5551:
160*4882a593Smuzhiyun case RGA2_FORMAT_RGBA_4444:
161*4882a593Smuzhiyun case RGA2_FORMAT_BGR_565:
162*4882a593Smuzhiyun case RGA2_FORMAT_YCbCr_422_SP:
163*4882a593Smuzhiyun case RGA2_FORMAT_YCbCr_422_P:
164*4882a593Smuzhiyun case RGA2_FORMAT_YCrCb_422_SP:
165*4882a593Smuzhiyun case RGA2_FORMAT_YCrCb_422_P:
166*4882a593Smuzhiyun case RGA2_FORMAT_BGRA_5551:
167*4882a593Smuzhiyun case RGA2_FORMAT_BGRA_4444:
168*4882a593Smuzhiyun case RGA2_FORMAT_ARGB_5551:
169*4882a593Smuzhiyun case RGA2_FORMAT_ARGB_4444:
170*4882a593Smuzhiyun case RGA2_FORMAT_ABGR_5551:
171*4882a593Smuzhiyun case RGA2_FORMAT_ABGR_4444:
172*4882a593Smuzhiyun bits = 16;
173*4882a593Smuzhiyun break;
174*4882a593Smuzhiyun case RGA2_FORMAT_YCbCr_420_SP:
175*4882a593Smuzhiyun case RGA2_FORMAT_YCbCr_420_P:
176*4882a593Smuzhiyun case RGA2_FORMAT_YCrCb_420_SP:
177*4882a593Smuzhiyun case RGA2_FORMAT_YCrCb_420_P:
178*4882a593Smuzhiyun bits = 12;
179*4882a593Smuzhiyun break;
180*4882a593Smuzhiyun case RGA2_FORMAT_YCbCr_420_SP_10B:
181*4882a593Smuzhiyun case RGA2_FORMAT_YCrCb_420_SP_10B:
182*4882a593Smuzhiyun case RGA2_FORMAT_YCbCr_422_SP_10B:
183*4882a593Smuzhiyun case RGA2_FORMAT_YCrCb_422_SP_10B:
184*4882a593Smuzhiyun bits = 15;
185*4882a593Smuzhiyun break;
186*4882a593Smuzhiyun default:
187*4882a593Smuzhiyun pr_err("unknown format [%d]\n", format);
188*4882a593Smuzhiyun return -1;
189*4882a593Smuzhiyun }
190*4882a593Smuzhiyun
191*4882a593Smuzhiyun return bits;
192*4882a593Smuzhiyun }
rga2_user_memory_check(struct page ** pages,u32 w,u32 h,u32 format,int flag)193*4882a593Smuzhiyun static int rga2_user_memory_check(struct page **pages, u32 w, u32 h, u32 format, int flag)
194*4882a593Smuzhiyun {
195*4882a593Smuzhiyun int bits;
196*4882a593Smuzhiyun void *vaddr = NULL;
197*4882a593Smuzhiyun int taipage_num;
198*4882a593Smuzhiyun int taidata_num;
199*4882a593Smuzhiyun int *tai_vaddr = NULL;
200*4882a593Smuzhiyun
201*4882a593Smuzhiyun bits = rga2_get_format_bits(format);
202*4882a593Smuzhiyun if (bits < 0)
203*4882a593Smuzhiyun return -1;
204*4882a593Smuzhiyun
205*4882a593Smuzhiyun taipage_num = w * h * bits / 8 / (1024 * 4);
206*4882a593Smuzhiyun taidata_num = w * h * bits / 8 % (1024 * 4);
207*4882a593Smuzhiyun if (taidata_num == 0) {
208*4882a593Smuzhiyun vaddr = kmap(pages[taipage_num - 1]);
209*4882a593Smuzhiyun tai_vaddr = (int *)vaddr + 1023;
210*4882a593Smuzhiyun } else {
211*4882a593Smuzhiyun vaddr = kmap(pages[taipage_num]);
212*4882a593Smuzhiyun tai_vaddr = (int *)vaddr + taidata_num / 4 - 1;
213*4882a593Smuzhiyun }
214*4882a593Smuzhiyun
215*4882a593Smuzhiyun if (flag == 1) {
216*4882a593Smuzhiyun pr_info("src user memory check\n");
217*4882a593Smuzhiyun pr_info("tai data is %d\n", *tai_vaddr);
218*4882a593Smuzhiyun } else {
219*4882a593Smuzhiyun pr_info("dst user memory check\n");
220*4882a593Smuzhiyun pr_info("tai data is %d\n", *tai_vaddr);
221*4882a593Smuzhiyun }
222*4882a593Smuzhiyun
223*4882a593Smuzhiyun if (taidata_num == 0)
224*4882a593Smuzhiyun kunmap(pages[taipage_num - 1]);
225*4882a593Smuzhiyun else
226*4882a593Smuzhiyun kunmap(pages[taipage_num]);
227*4882a593Smuzhiyun
228*4882a593Smuzhiyun return 0;
229*4882a593Smuzhiyun }
230*4882a593Smuzhiyun
rga2_virtual_memory_check(void * vaddr,u32 w,u32 h,u32 format,int fd)231*4882a593Smuzhiyun static int rga2_virtual_memory_check(void *vaddr, u32 w, u32 h, u32 format, int fd)
232*4882a593Smuzhiyun {
233*4882a593Smuzhiyun int bits = 32;
234*4882a593Smuzhiyun int temp_data = 0;
235*4882a593Smuzhiyun void *one_line = NULL;
236*4882a593Smuzhiyun
237*4882a593Smuzhiyun bits = rga2_get_format_bits(format);
238*4882a593Smuzhiyun if (bits < 0)
239*4882a593Smuzhiyun return -1;
240*4882a593Smuzhiyun
241*4882a593Smuzhiyun one_line = kzalloc(w * 4, GFP_KERNEL);
242*4882a593Smuzhiyun if (!one_line) {
243*4882a593Smuzhiyun ERR("kzalloc fail %s[%d]\n", __func__, __LINE__);
244*4882a593Smuzhiyun return 0;
245*4882a593Smuzhiyun }
246*4882a593Smuzhiyun
247*4882a593Smuzhiyun temp_data = w * (h - 1) * bits >> 3;
248*4882a593Smuzhiyun if (fd > 0) {
249*4882a593Smuzhiyun INFO("vaddr is%p, bits is %d, fd check\n", vaddr, bits);
250*4882a593Smuzhiyun memcpy(one_line, (char *)vaddr + temp_data, w * bits >> 3);
251*4882a593Smuzhiyun INFO("fd check ok\n");
252*4882a593Smuzhiyun } else {
253*4882a593Smuzhiyun INFO("vir addr memory check.\n");
254*4882a593Smuzhiyun memcpy((void *)((char *)vaddr + temp_data), one_line,
255*4882a593Smuzhiyun w * bits >> 3);
256*4882a593Smuzhiyun INFO("vir addr check ok.\n");
257*4882a593Smuzhiyun }
258*4882a593Smuzhiyun
259*4882a593Smuzhiyun kfree(one_line);
260*4882a593Smuzhiyun return 0;
261*4882a593Smuzhiyun }
262*4882a593Smuzhiyun
rga2_dma_memory_check(struct rga_dma_buffer_t * buffer,struct rga_img_info_t * img)263*4882a593Smuzhiyun static int rga2_dma_memory_check(struct rga_dma_buffer_t *buffer,
264*4882a593Smuzhiyun struct rga_img_info_t *img)
265*4882a593Smuzhiyun {
266*4882a593Smuzhiyun int ret = 0;
267*4882a593Smuzhiyun void *vaddr;
268*4882a593Smuzhiyun struct dma_buf *dma_buffer;
269*4882a593Smuzhiyun
270*4882a593Smuzhiyun dma_buffer = buffer->dma_buf;
271*4882a593Smuzhiyun
272*4882a593Smuzhiyun if (!IS_ERR_OR_NULL(dma_buffer)) {
273*4882a593Smuzhiyun vaddr = dma_buf_vmap(dma_buffer);
274*4882a593Smuzhiyun if (vaddr) {
275*4882a593Smuzhiyun ret = rga2_virtual_memory_check(vaddr, img->vir_w, img->vir_h,
276*4882a593Smuzhiyun img->format, img->yrgb_addr);
277*4882a593Smuzhiyun } else {
278*4882a593Smuzhiyun pr_err("can't vmap the dma buffer!\n");
279*4882a593Smuzhiyun return -EINVAL;
280*4882a593Smuzhiyun }
281*4882a593Smuzhiyun
282*4882a593Smuzhiyun dma_buf_vunmap(dma_buffer, vaddr);
283*4882a593Smuzhiyun }
284*4882a593Smuzhiyun
285*4882a593Smuzhiyun return ret;
286*4882a593Smuzhiyun }
287*4882a593Smuzhiyun #endif
288*4882a593Smuzhiyun
289*4882a593Smuzhiyun #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0))
rga2_map_dma_buffer(int fd,struct rga_dma_buffer_t * rga_dma_buffer,enum dma_data_direction dir)290*4882a593Smuzhiyun static int rga2_map_dma_buffer(int fd,
291*4882a593Smuzhiyun struct rga_dma_buffer_t *rga_dma_buffer,
292*4882a593Smuzhiyun enum dma_data_direction dir)
293*4882a593Smuzhiyun {
294*4882a593Smuzhiyun struct device *rga_dev = NULL;
295*4882a593Smuzhiyun struct dma_buf *dma_buf = NULL;
296*4882a593Smuzhiyun struct dma_buf_attachment *attach = NULL;
297*4882a593Smuzhiyun struct sg_table *sgt = NULL;
298*4882a593Smuzhiyun int ret = 0;
299*4882a593Smuzhiyun
300*4882a593Smuzhiyun rga_dev = rga2_drvdata->dev;
301*4882a593Smuzhiyun
302*4882a593Smuzhiyun dma_buf = dma_buf_get(fd);
303*4882a593Smuzhiyun if (IS_ERR(dma_buf)) {
304*4882a593Smuzhiyun ret = -EINVAL;
305*4882a593Smuzhiyun pr_err("dma_buf_get fail fd[%d]\n", fd);
306*4882a593Smuzhiyun return ret;
307*4882a593Smuzhiyun }
308*4882a593Smuzhiyun
309*4882a593Smuzhiyun attach = dma_buf_attach(dma_buf, rga_dev);
310*4882a593Smuzhiyun if (IS_ERR(attach)) {
311*4882a593Smuzhiyun ret = -EINVAL;
312*4882a593Smuzhiyun pr_err("Failed to attach dma_buf\n");
313*4882a593Smuzhiyun goto err_get_attach;
314*4882a593Smuzhiyun }
315*4882a593Smuzhiyun
316*4882a593Smuzhiyun sgt = dma_buf_map_attachment(attach, dir);
317*4882a593Smuzhiyun if (IS_ERR(sgt)) {
318*4882a593Smuzhiyun ret = -EINVAL;
319*4882a593Smuzhiyun pr_err("Failed to map src attachment\n");
320*4882a593Smuzhiyun goto err_get_sgt;
321*4882a593Smuzhiyun }
322*4882a593Smuzhiyun
323*4882a593Smuzhiyun rga_dma_buffer->dma_buf = dma_buf;
324*4882a593Smuzhiyun rga_dma_buffer->attach = attach;
325*4882a593Smuzhiyun rga_dma_buffer->sgt = sgt;
326*4882a593Smuzhiyun rga_dma_buffer->size = sg_dma_len(sgt->sgl);
327*4882a593Smuzhiyun rga_dma_buffer->dir = dir;
328*4882a593Smuzhiyun
329*4882a593Smuzhiyun return ret;
330*4882a593Smuzhiyun
331*4882a593Smuzhiyun err_get_sgt:
332*4882a593Smuzhiyun if (attach)
333*4882a593Smuzhiyun dma_buf_detach(dma_buf, attach);
334*4882a593Smuzhiyun err_get_attach:
335*4882a593Smuzhiyun if (dma_buf)
336*4882a593Smuzhiyun dma_buf_put(dma_buf);
337*4882a593Smuzhiyun
338*4882a593Smuzhiyun return ret;
339*4882a593Smuzhiyun }
340*4882a593Smuzhiyun
rga2_unmap_dma_buffer(struct rga_dma_buffer_t * rga_dma_buffer)341*4882a593Smuzhiyun static void rga2_unmap_dma_buffer(struct rga_dma_buffer_t *rga_dma_buffer)
342*4882a593Smuzhiyun {
343*4882a593Smuzhiyun if (rga_dma_buffer->attach && rga_dma_buffer->sgt)
344*4882a593Smuzhiyun dma_buf_unmap_attachment(rga_dma_buffer->attach,
345*4882a593Smuzhiyun rga_dma_buffer->sgt,
346*4882a593Smuzhiyun rga_dma_buffer->dir);
347*4882a593Smuzhiyun if (rga_dma_buffer->attach) {
348*4882a593Smuzhiyun dma_buf_detach(rga_dma_buffer->dma_buf, rga_dma_buffer->attach);
349*4882a593Smuzhiyun dma_buf_put(rga_dma_buffer->dma_buf);
350*4882a593Smuzhiyun }
351*4882a593Smuzhiyun }
352*4882a593Smuzhiyun
rga2_convert_addr(struct rga_img_info_t * img)353*4882a593Smuzhiyun static void rga2_convert_addr(struct rga_img_info_t *img)
354*4882a593Smuzhiyun {
355*4882a593Smuzhiyun /*
356*4882a593Smuzhiyun * If it is not using dma fd, the virtual/phyical address is assigned
357*4882a593Smuzhiyun * to the address of the corresponding channel.
358*4882a593Smuzhiyun */
359*4882a593Smuzhiyun img->yrgb_addr = img->uv_addr;
360*4882a593Smuzhiyun img->uv_addr = img->yrgb_addr + (img->vir_w * img->vir_h);
361*4882a593Smuzhiyun if (rga2_is_yuv422p_format(img->format))
362*4882a593Smuzhiyun img->v_addr = img->uv_addr + (img->vir_w * img->vir_h) / 2;
363*4882a593Smuzhiyun else
364*4882a593Smuzhiyun img->v_addr = img->uv_addr + (img->vir_w * img->vir_h) / 4;
365*4882a593Smuzhiyun }
366*4882a593Smuzhiyun
rga2_get_dma_info(struct rga2_reg * reg,struct rga2_req * req)367*4882a593Smuzhiyun int rga2_get_dma_info(struct rga2_reg *reg, struct rga2_req *req)
368*4882a593Smuzhiyun {
369*4882a593Smuzhiyun uint32_t mmu_flag;
370*4882a593Smuzhiyun int ret;
371*4882a593Smuzhiyun
372*4882a593Smuzhiyun struct rga_dma_buffer_t *buffer_src0, *buffer_src1, *buffer_dst, *buffer_els;
373*4882a593Smuzhiyun struct rga_img_info_t *src0, *src1, *dst, *els;
374*4882a593Smuzhiyun
375*4882a593Smuzhiyun /*
376*4882a593Smuzhiyun * Since the life cycle of rga2_req cannot satisfy the release of
377*4882a593Smuzhiyun * dmabuffer after the task is over, the mapped dmabuffer is saved
378*4882a593Smuzhiyun * in rga2_reg.
379*4882a593Smuzhiyun */
380*4882a593Smuzhiyun buffer_src0 = ®->dma_buffer_src0;
381*4882a593Smuzhiyun buffer_src1 = ®->dma_buffer_src1;
382*4882a593Smuzhiyun buffer_dst = ®->dma_buffer_dst;
383*4882a593Smuzhiyun buffer_els = ®->dma_buffer_els;
384*4882a593Smuzhiyun
385*4882a593Smuzhiyun src0 = &req->src;
386*4882a593Smuzhiyun src1 = &req->src1;
387*4882a593Smuzhiyun dst = &req->dst;
388*4882a593Smuzhiyun els = &req->pat;
389*4882a593Smuzhiyun
390*4882a593Smuzhiyun /* src0 chanel */
391*4882a593Smuzhiyun mmu_flag = req->mmu_info.src0_mmu_flag;
392*4882a593Smuzhiyun if (unlikely(!mmu_flag && src0->yrgb_addr)) {
393*4882a593Smuzhiyun pr_err("Fix it please enable src0 mmu\n");
394*4882a593Smuzhiyun return -EINVAL;
395*4882a593Smuzhiyun } else if (mmu_flag && src0->yrgb_addr) {
396*4882a593Smuzhiyun ret = rga2_map_dma_buffer(src0->yrgb_addr, buffer_src0, DMA_BIDIRECTIONAL);
397*4882a593Smuzhiyun if (ret < 0) {
398*4882a593Smuzhiyun pr_err("src0: can't map dma-buf\n");
399*4882a593Smuzhiyun return ret;
400*4882a593Smuzhiyun }
401*4882a593Smuzhiyun }
402*4882a593Smuzhiyun #ifdef CONFIG_ROCKCHIP_RGA2_DEBUGGER
403*4882a593Smuzhiyun if (RGA2_CHECK_MODE) {
404*4882a593Smuzhiyun ret = rga2_dma_memory_check(buffer_src0, src0);
405*4882a593Smuzhiyun if (ret < 0) {
406*4882a593Smuzhiyun pr_err("src0 channel check memory error!\n");
407*4882a593Smuzhiyun return ret;
408*4882a593Smuzhiyun }
409*4882a593Smuzhiyun }
410*4882a593Smuzhiyun #endif
411*4882a593Smuzhiyun rga2_convert_addr(src0);
412*4882a593Smuzhiyun
413*4882a593Smuzhiyun /* src1 chanel */
414*4882a593Smuzhiyun mmu_flag = req->mmu_info.src1_mmu_flag;
415*4882a593Smuzhiyun if (unlikely(!mmu_flag && src1->yrgb_addr)) {
416*4882a593Smuzhiyun pr_err("Fix it please enable src1 mmu\n");
417*4882a593Smuzhiyun ret = -EINVAL;
418*4882a593Smuzhiyun goto err_src1_channel;
419*4882a593Smuzhiyun } else if (mmu_flag && src1->yrgb_addr) {
420*4882a593Smuzhiyun ret = rga2_map_dma_buffer(src1->yrgb_addr, buffer_src1, DMA_BIDIRECTIONAL);
421*4882a593Smuzhiyun if (ret < 0) {
422*4882a593Smuzhiyun pr_err("src1: can't map dma-buf\n");
423*4882a593Smuzhiyun goto err_src1_channel;
424*4882a593Smuzhiyun }
425*4882a593Smuzhiyun }
426*4882a593Smuzhiyun #ifdef CONFIG_ROCKCHIP_RGA2_DEBUGGER
427*4882a593Smuzhiyun if (RGA2_CHECK_MODE) {
428*4882a593Smuzhiyun ret = rga2_dma_memory_check(buffer_src1, src1);
429*4882a593Smuzhiyun if (ret < 0) {
430*4882a593Smuzhiyun pr_err("src1 channel check memory error!\n");
431*4882a593Smuzhiyun goto err_src1_channel;
432*4882a593Smuzhiyun }
433*4882a593Smuzhiyun }
434*4882a593Smuzhiyun #endif
435*4882a593Smuzhiyun rga2_convert_addr(src1);
436*4882a593Smuzhiyun
437*4882a593Smuzhiyun /* dst chanel */
438*4882a593Smuzhiyun mmu_flag = req->mmu_info.dst_mmu_flag;
439*4882a593Smuzhiyun if (unlikely(!mmu_flag && dst->yrgb_addr)) {
440*4882a593Smuzhiyun pr_err("Fix it please enable dst mmu\n");
441*4882a593Smuzhiyun ret = -EINVAL;
442*4882a593Smuzhiyun goto err_dst_channel;
443*4882a593Smuzhiyun } else if (mmu_flag && dst->yrgb_addr) {
444*4882a593Smuzhiyun ret = rga2_map_dma_buffer(dst->yrgb_addr, buffer_dst, DMA_BIDIRECTIONAL);
445*4882a593Smuzhiyun if (ret < 0) {
446*4882a593Smuzhiyun pr_err("dst: can't map dma-buf\n");
447*4882a593Smuzhiyun goto err_dst_channel;
448*4882a593Smuzhiyun }
449*4882a593Smuzhiyun }
450*4882a593Smuzhiyun #ifdef CONFIG_ROCKCHIP_RGA2_DEBUGGER
451*4882a593Smuzhiyun if (RGA2_CHECK_MODE) {
452*4882a593Smuzhiyun ret = rga2_dma_memory_check(buffer_dst, dst);
453*4882a593Smuzhiyun if (ret < 0) {
454*4882a593Smuzhiyun pr_err("dst channel check memory error!\n");
455*4882a593Smuzhiyun goto err_dst_channel;
456*4882a593Smuzhiyun }
457*4882a593Smuzhiyun }
458*4882a593Smuzhiyun #endif
459*4882a593Smuzhiyun rga2_convert_addr(dst);
460*4882a593Smuzhiyun
461*4882a593Smuzhiyun /* els chanel */
462*4882a593Smuzhiyun mmu_flag = req->mmu_info.els_mmu_flag;
463*4882a593Smuzhiyun if (unlikely(!mmu_flag && els->yrgb_addr)) {
464*4882a593Smuzhiyun pr_err("Fix it please enable els mmu\n");
465*4882a593Smuzhiyun ret = -EINVAL;
466*4882a593Smuzhiyun goto err_els_channel;
467*4882a593Smuzhiyun } else if (mmu_flag && els->yrgb_addr) {
468*4882a593Smuzhiyun ret = rga2_map_dma_buffer(els->yrgb_addr, buffer_els, DMA_BIDIRECTIONAL);
469*4882a593Smuzhiyun if (ret < 0) {
470*4882a593Smuzhiyun pr_err("els: can't map dma-buf\n");
471*4882a593Smuzhiyun goto err_els_channel;
472*4882a593Smuzhiyun }
473*4882a593Smuzhiyun }
474*4882a593Smuzhiyun #ifdef CONFIG_ROCKCHIP_RGA2_DEBUGGER
475*4882a593Smuzhiyun if (RGA2_CHECK_MODE) {
476*4882a593Smuzhiyun ret = rga2_dma_memory_check(buffer_els, els);
477*4882a593Smuzhiyun if (ret < 0) {
478*4882a593Smuzhiyun pr_err("els channel check memory error!\n");
479*4882a593Smuzhiyun goto err_els_channel;
480*4882a593Smuzhiyun }
481*4882a593Smuzhiyun }
482*4882a593Smuzhiyun #endif
483*4882a593Smuzhiyun rga2_convert_addr(els);
484*4882a593Smuzhiyun
485*4882a593Smuzhiyun return 0;
486*4882a593Smuzhiyun
487*4882a593Smuzhiyun err_els_channel:
488*4882a593Smuzhiyun rga2_unmap_dma_buffer(buffer_dst);
489*4882a593Smuzhiyun err_dst_channel:
490*4882a593Smuzhiyun rga2_unmap_dma_buffer(buffer_src1);
491*4882a593Smuzhiyun err_src1_channel:
492*4882a593Smuzhiyun rga2_unmap_dma_buffer(buffer_src0);
493*4882a593Smuzhiyun
494*4882a593Smuzhiyun return ret;
495*4882a593Smuzhiyun }
496*4882a593Smuzhiyun
rga2_put_dma_info(struct rga2_reg * reg)497*4882a593Smuzhiyun void rga2_put_dma_info(struct rga2_reg *reg)
498*4882a593Smuzhiyun {
499*4882a593Smuzhiyun rga2_unmap_dma_buffer(®->dma_buffer_src0);
500*4882a593Smuzhiyun rga2_unmap_dma_buffer(®->dma_buffer_src1);
501*4882a593Smuzhiyun rga2_unmap_dma_buffer(®->dma_buffer_dst);
502*4882a593Smuzhiyun rga2_unmap_dma_buffer(®->dma_buffer_els);
503*4882a593Smuzhiyun }
504*4882a593Smuzhiyun #else
rga2_get_dma_info(struct rga2_reg * reg,struct rga2_req * req)505*4882a593Smuzhiyun static int rga2_get_dma_info(struct rga2_reg *reg, struct rga2_req *req)
506*4882a593Smuzhiyun {
507*4882a593Smuzhiyun struct ion_handle *hdl;
508*4882a593Smuzhiyun ion_phys_addr_t phy_addr;
509*4882a593Smuzhiyun size_t len;
510*4882a593Smuzhiyun int ret;
511*4882a593Smuzhiyun u32 src_vir_w, dst_vir_w;
512*4882a593Smuzhiyun void *vaddr = NULL;
513*4882a593Smuzhiyun struct rga_dma_buffer_t *buffer_src0, *buffer_src1, *buffer_dst, *buffer_els;
514*4882a593Smuzhiyun
515*4882a593Smuzhiyun src_vir_w = req->src.vir_w;
516*4882a593Smuzhiyun dst_vir_w = req->dst.vir_w;
517*4882a593Smuzhiyun
518*4882a593Smuzhiyun buffer_src0 = ®->dma_buffer_src0;
519*4882a593Smuzhiyun buffer_src1 = ®->dma_buffer_src1;
520*4882a593Smuzhiyun buffer_dst = ®->dma_buffer_dst;
521*4882a593Smuzhiyun buffer_els = ®->dma_buffer_els;
522*4882a593Smuzhiyun
523*4882a593Smuzhiyun if ((int)req->src.yrgb_addr > 0) {
524*4882a593Smuzhiyun hdl = ion_import_dma_buf(rga2_drvdata->ion_client,
525*4882a593Smuzhiyun req->src.yrgb_addr);
526*4882a593Smuzhiyun if (IS_ERR(hdl)) {
527*4882a593Smuzhiyun ret = PTR_ERR(hdl);
528*4882a593Smuzhiyun pr_err("RGA2 SRC ERROR ion buf handle\n");
529*4882a593Smuzhiyun return ret;
530*4882a593Smuzhiyun }
531*4882a593Smuzhiyun #ifdef CONFIG_ROCKCHIP_RGA2_DEBUGGER
532*4882a593Smuzhiyun if (RGA2_CHECK_MODE) {
533*4882a593Smuzhiyun vaddr = ion_map_kernel(rga2_drvdata->ion_client, hdl);
534*4882a593Smuzhiyun if (vaddr)
535*4882a593Smuzhiyun rga2_memory_check(vaddr, req->src.vir_w, req->src.vir_h,
536*4882a593Smuzhiyun req->src.format, req->src.yrgb_addr);
537*4882a593Smuzhiyun ion_unmap_kernel(rga2_drvdata->ion_client, hdl);
538*4882a593Smuzhiyun }
539*4882a593Smuzhiyun #endif
540*4882a593Smuzhiyun if (req->mmu_info.src0_mmu_flag) {
541*4882a593Smuzhiyun buffer_src0.sgt =
542*4882a593Smuzhiyun ion_sg_table(rga2_drvdata->ion_client, hdl);
543*4882a593Smuzhiyun req->src.yrgb_addr = req->src.uv_addr;
544*4882a593Smuzhiyun req->src.uv_addr =
545*4882a593Smuzhiyun req->src.yrgb_addr + (src_vir_w * req->src.vir_h);
546*4882a593Smuzhiyun req->src.v_addr =
547*4882a593Smuzhiyun req->src.uv_addr + (src_vir_w * req->src.vir_h) / 4;
548*4882a593Smuzhiyun } else {
549*4882a593Smuzhiyun ion_phys(rga2_drvdata->ion_client, hdl, &phy_addr, &len);
550*4882a593Smuzhiyun req->src.yrgb_addr = phy_addr;
551*4882a593Smuzhiyun req->src.uv_addr =
552*4882a593Smuzhiyun req->src.yrgb_addr + (src_vir_w * req->src.vir_h);
553*4882a593Smuzhiyun req->src.v_addr =
554*4882a593Smuzhiyun req->src.uv_addr + (src_vir_w * req->src.vir_h) / 4;
555*4882a593Smuzhiyun }
556*4882a593Smuzhiyun ion_free(rga2_drvdata->ion_client, hdl);
557*4882a593Smuzhiyun } else {
558*4882a593Smuzhiyun req->src.yrgb_addr = req->src.uv_addr;
559*4882a593Smuzhiyun req->src.uv_addr =
560*4882a593Smuzhiyun req->src.yrgb_addr + (src_vir_w * req->src.vir_h);
561*4882a593Smuzhiyun req->src.v_addr =
562*4882a593Smuzhiyun req->src.uv_addr + (src_vir_w * req->src.vir_h) / 4;
563*4882a593Smuzhiyun }
564*4882a593Smuzhiyun
565*4882a593Smuzhiyun if ((int)req->dst.yrgb_addr > 0) {
566*4882a593Smuzhiyun hdl = ion_import_dma_buf(rga2_drvdata->ion_client,
567*4882a593Smuzhiyun req->dst.yrgb_addr);
568*4882a593Smuzhiyun if (IS_ERR(hdl)) {
569*4882a593Smuzhiyun ret = PTR_ERR(hdl);
570*4882a593Smuzhiyun pr_err("RGA2 DST ERROR ion buf handle\n");
571*4882a593Smuzhiyun return ret;
572*4882a593Smuzhiyun }
573*4882a593Smuzhiyun #ifdef CONFIG_ROCKCHIP_RGA2_DEBUGGER
574*4882a593Smuzhiyun if (RGA2_CHECK_MODE) {
575*4882a593Smuzhiyun vaddr = ion_map_kernel(rga2_drvdata->ion_client, hdl);
576*4882a593Smuzhiyun if (vaddr)
577*4882a593Smuzhiyun rga2_memory_check(vaddr, req->dst.vir_w, req->dst.vir_h,
578*4882a593Smuzhiyun req->dst.format, req->dst.yrgb_addr);
579*4882a593Smuzhiyun ion_unmap_kernel(rga2_drvdata->ion_client, hdl);
580*4882a593Smuzhiyun }
581*4882a593Smuzhiyun #endif
582*4882a593Smuzhiyun if (req->mmu_info.dst_mmu_flag) {
583*4882a593Smuzhiyun buffer_dst.sgt =
584*4882a593Smuzhiyun ion_sg_table(rga2_drvdata->ion_client, hdl);
585*4882a593Smuzhiyun req->dst.yrgb_addr = req->dst.uv_addr;
586*4882a593Smuzhiyun req->dst.uv_addr =
587*4882a593Smuzhiyun req->dst.yrgb_addr + (dst_vir_w * req->dst.vir_h);
588*4882a593Smuzhiyun req->dst.v_addr =
589*4882a593Smuzhiyun req->dst.uv_addr + (dst_vir_w * req->dst.vir_h) / 4;
590*4882a593Smuzhiyun } else {
591*4882a593Smuzhiyun ion_phys(rga2_drvdata->ion_client, hdl, &phy_addr, &len);
592*4882a593Smuzhiyun req->dst.yrgb_addr = phy_addr;
593*4882a593Smuzhiyun req->dst.uv_addr =
594*4882a593Smuzhiyun req->dst.yrgb_addr + (dst_vir_w * req->dst.vir_h);
595*4882a593Smuzhiyun req->dst.v_addr =
596*4882a593Smuzhiyun req->dst.uv_addr + (dst_vir_w * req->dst.vir_h) / 4;
597*4882a593Smuzhiyun }
598*4882a593Smuzhiyun ion_free(rga2_drvdata->ion_client, hdl);
599*4882a593Smuzhiyun } else {
600*4882a593Smuzhiyun req->dst.yrgb_addr = req->dst.uv_addr;
601*4882a593Smuzhiyun req->dst.uv_addr =
602*4882a593Smuzhiyun req->dst.yrgb_addr + (dst_vir_w * req->dst.vir_h);
603*4882a593Smuzhiyun req->dst.v_addr =
604*4882a593Smuzhiyun req->dst.uv_addr + (dst_vir_w * req->dst.vir_h) / 4;
605*4882a593Smuzhiyun }
606*4882a593Smuzhiyun
607*4882a593Smuzhiyun if ((int)req->src1.yrgb_addr > 0) {
608*4882a593Smuzhiyun hdl = ion_import_dma_buf(rga2_drvdata->ion_client,
609*4882a593Smuzhiyun req->src1.yrgb_addr);
610*4882a593Smuzhiyun if (IS_ERR(hdl)) {
611*4882a593Smuzhiyun ret = PTR_ERR(hdl);
612*4882a593Smuzhiyun pr_err("RGA2 ERROR ion buf handle\n");
613*4882a593Smuzhiyun return ret;
614*4882a593Smuzhiyun }
615*4882a593Smuzhiyun if (req->mmu_info.dst_mmu_flag) {
616*4882a593Smuzhiyun buffer_src1.sgt =
617*4882a593Smuzhiyun ion_sg_table(rga2_drvdata->ion_client, hdl);
618*4882a593Smuzhiyun req->src1.yrgb_addr = req->src1.uv_addr;
619*4882a593Smuzhiyun req->src1.uv_addr =
620*4882a593Smuzhiyun req->src1.yrgb_addr + (req->src1.vir_w * req->src1.vir_h);
621*4882a593Smuzhiyun req->src1.v_addr =
622*4882a593Smuzhiyun req->src1.uv_addr + (req->src1.vir_w * req->src1.vir_h) / 4;
623*4882a593Smuzhiyun } else {
624*4882a593Smuzhiyun ion_phys(rga2_drvdata->ion_client, hdl, &phy_addr, &len);
625*4882a593Smuzhiyun req->src1.yrgb_addr = phy_addr;
626*4882a593Smuzhiyun req->src1.uv_addr =
627*4882a593Smuzhiyun req->src1.yrgb_addr + (req->src1.vir_w * req->src1.vir_h);
628*4882a593Smuzhiyun req->src1.v_addr =
629*4882a593Smuzhiyun req->src1.uv_addr + (req->src1.vir_w * req->src1.vir_h) / 4;
630*4882a593Smuzhiyun }
631*4882a593Smuzhiyun ion_free(rga2_drvdata->ion_client, hdl);
632*4882a593Smuzhiyun } else {
633*4882a593Smuzhiyun req->src1.yrgb_addr = req->src1.uv_addr;
634*4882a593Smuzhiyun req->src1.uv_addr =
635*4882a593Smuzhiyun req->src1.yrgb_addr + (req->src1.vir_w * req->src1.vir_h);
636*4882a593Smuzhiyun req->src1.v_addr =
637*4882a593Smuzhiyun req->src1.uv_addr + (req->src1.vir_w * req->src1.vir_h) / 4;
638*4882a593Smuzhiyun }
639*4882a593Smuzhiyun if (rga2_is_yuv422p_format(req->src.format))
640*4882a593Smuzhiyun req->src.v_addr = req->src.uv_addr + (req->src.vir_w * req->src.vir_h) / 2;
641*4882a593Smuzhiyun if (rga2_is_yuv422p_format(req->dst.format))
642*4882a593Smuzhiyun req->dst.v_addr = req->dst.uv_addr + (req->dst.vir_w * req->dst.vir_h) / 2;
643*4882a593Smuzhiyun if (rga2_is_yuv422p_format(req->src1.format))
644*4882a593Smuzhiyun req->src1.v_addr = req->src1.uv_addr + (req->src1.vir_w * req->dst.vir_h) / 2;
645*4882a593Smuzhiyun
646*4882a593Smuzhiyun return 0;
647*4882a593Smuzhiyun }
648*4882a593Smuzhiyun
649*4882a593Smuzhiyun /* When the kernel version is lower than 4.4, no put buffer operation is required. */
rga2_put_dma_info(struct rga2_reg * reg)650*4882a593Smuzhiyun void rga2_put_dma_info(struct rga2_reg *reg) {}
651*4882a593Smuzhiyun #endif
652*4882a593Smuzhiyun
rga2_mmu_buf_get(struct rga2_mmu_buf_t * t,uint32_t size)653*4882a593Smuzhiyun static int rga2_mmu_buf_get(struct rga2_mmu_buf_t *t, uint32_t size)
654*4882a593Smuzhiyun {
655*4882a593Smuzhiyun mutex_lock(&rga2_service.lock);
656*4882a593Smuzhiyun t->front += size;
657*4882a593Smuzhiyun mutex_unlock(&rga2_service.lock);
658*4882a593Smuzhiyun
659*4882a593Smuzhiyun return 0;
660*4882a593Smuzhiyun }
661*4882a593Smuzhiyun
rga2_mmu_buf_get_try(struct rga2_mmu_buf_t * t,uint32_t size)662*4882a593Smuzhiyun static int rga2_mmu_buf_get_try(struct rga2_mmu_buf_t *t, uint32_t size)
663*4882a593Smuzhiyun {
664*4882a593Smuzhiyun int ret = 0;
665*4882a593Smuzhiyun
666*4882a593Smuzhiyun mutex_lock(&rga2_service.lock);
667*4882a593Smuzhiyun if ((t->back - t->front) > t->size) {
668*4882a593Smuzhiyun if (t->front + size > t->back - t->size) {
669*4882a593Smuzhiyun pr_info("front %d, back %d dsize %d size %d",
670*4882a593Smuzhiyun t->front, t->back, t->size, size);
671*4882a593Smuzhiyun ret = -ENOMEM;
672*4882a593Smuzhiyun goto out;
673*4882a593Smuzhiyun }
674*4882a593Smuzhiyun } else {
675*4882a593Smuzhiyun if ((t->front + size) > t->back) {
676*4882a593Smuzhiyun pr_info("front %d, back %d dsize %d size %d",
677*4882a593Smuzhiyun t->front, t->back, t->size, size);
678*4882a593Smuzhiyun ret = -ENOMEM;
679*4882a593Smuzhiyun goto out;
680*4882a593Smuzhiyun }
681*4882a593Smuzhiyun
682*4882a593Smuzhiyun if (t->front + size > t->size) {
683*4882a593Smuzhiyun if (size > (t->back - t->size)) {
684*4882a593Smuzhiyun pr_info("front %d, back %d dsize %d size %d",
685*4882a593Smuzhiyun t->front, t->back, t->size, size);
686*4882a593Smuzhiyun ret = -ENOMEM;
687*4882a593Smuzhiyun goto out;
688*4882a593Smuzhiyun }
689*4882a593Smuzhiyun t->front = 0;
690*4882a593Smuzhiyun }
691*4882a593Smuzhiyun }
692*4882a593Smuzhiyun out:
693*4882a593Smuzhiyun mutex_unlock(&rga2_service.lock);
694*4882a593Smuzhiyun return ret;
695*4882a593Smuzhiyun }
696*4882a593Smuzhiyun
rga2_mem_size_cal(unsigned long Mem,uint32_t MemSize,unsigned long * StartAddr)697*4882a593Smuzhiyun static int rga2_mem_size_cal(unsigned long Mem, uint32_t MemSize, unsigned long *StartAddr)
698*4882a593Smuzhiyun {
699*4882a593Smuzhiyun unsigned long start, end;
700*4882a593Smuzhiyun uint32_t pageCount;
701*4882a593Smuzhiyun
702*4882a593Smuzhiyun end = (Mem + (MemSize + PAGE_SIZE - 1)) >> PAGE_SHIFT;
703*4882a593Smuzhiyun start = Mem >> PAGE_SHIFT;
704*4882a593Smuzhiyun pageCount = end - start;
705*4882a593Smuzhiyun *StartAddr = start;
706*4882a593Smuzhiyun return pageCount;
707*4882a593Smuzhiyun }
708*4882a593Smuzhiyun
rga2_buf_size_cal(unsigned long yrgb_addr,unsigned long uv_addr,unsigned long v_addr,int format,uint32_t w,uint32_t h,unsigned long * StartAddr)709*4882a593Smuzhiyun static int rga2_buf_size_cal(unsigned long yrgb_addr, unsigned long uv_addr, unsigned long v_addr,
710*4882a593Smuzhiyun int format, uint32_t w, uint32_t h, unsigned long *StartAddr )
711*4882a593Smuzhiyun {
712*4882a593Smuzhiyun uint32_t size_yrgb = 0;
713*4882a593Smuzhiyun uint32_t size_uv = 0;
714*4882a593Smuzhiyun uint32_t size_v = 0;
715*4882a593Smuzhiyun uint32_t stride = 0;
716*4882a593Smuzhiyun unsigned long start, end;
717*4882a593Smuzhiyun uint32_t pageCount;
718*4882a593Smuzhiyun
719*4882a593Smuzhiyun switch(format)
720*4882a593Smuzhiyun {
721*4882a593Smuzhiyun case RGA2_FORMAT_RGBA_8888 :
722*4882a593Smuzhiyun case RGA2_FORMAT_RGBX_8888 :
723*4882a593Smuzhiyun case RGA2_FORMAT_BGRA_8888 :
724*4882a593Smuzhiyun case RGA2_FORMAT_BGRX_8888 :
725*4882a593Smuzhiyun case RGA2_FORMAT_ARGB_8888 :
726*4882a593Smuzhiyun case RGA2_FORMAT_XRGB_8888 :
727*4882a593Smuzhiyun case RGA2_FORMAT_ABGR_8888 :
728*4882a593Smuzhiyun case RGA2_FORMAT_XBGR_8888 :
729*4882a593Smuzhiyun stride = (w * 4 + 3) & (~3);
730*4882a593Smuzhiyun size_yrgb = stride*h;
731*4882a593Smuzhiyun start = yrgb_addr >> PAGE_SHIFT;
732*4882a593Smuzhiyun end = yrgb_addr + size_yrgb;
733*4882a593Smuzhiyun end = (end + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
734*4882a593Smuzhiyun pageCount = end - start;
735*4882a593Smuzhiyun break;
736*4882a593Smuzhiyun case RGA2_FORMAT_RGB_888 :
737*4882a593Smuzhiyun case RGA2_FORMAT_BGR_888 :
738*4882a593Smuzhiyun stride = (w * 3 + 3) & (~3);
739*4882a593Smuzhiyun size_yrgb = stride*h;
740*4882a593Smuzhiyun start = yrgb_addr >> PAGE_SHIFT;
741*4882a593Smuzhiyun end = yrgb_addr + size_yrgb;
742*4882a593Smuzhiyun end = (end + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
743*4882a593Smuzhiyun pageCount = end - start;
744*4882a593Smuzhiyun break;
745*4882a593Smuzhiyun case RGA2_FORMAT_RGB_565 :
746*4882a593Smuzhiyun case RGA2_FORMAT_RGBA_5551 :
747*4882a593Smuzhiyun case RGA2_FORMAT_RGBA_4444 :
748*4882a593Smuzhiyun case RGA2_FORMAT_BGR_565 :
749*4882a593Smuzhiyun case RGA2_FORMAT_BGRA_5551 :
750*4882a593Smuzhiyun case RGA2_FORMAT_BGRA_4444 :
751*4882a593Smuzhiyun case RGA2_FORMAT_ARGB_5551 :
752*4882a593Smuzhiyun case RGA2_FORMAT_ARGB_4444 :
753*4882a593Smuzhiyun case RGA2_FORMAT_ABGR_5551 :
754*4882a593Smuzhiyun case RGA2_FORMAT_ABGR_4444 :
755*4882a593Smuzhiyun stride = (w*2 + 3) & (~3);
756*4882a593Smuzhiyun size_yrgb = stride * h;
757*4882a593Smuzhiyun start = yrgb_addr >> PAGE_SHIFT;
758*4882a593Smuzhiyun end = yrgb_addr + size_yrgb;
759*4882a593Smuzhiyun end = (end + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
760*4882a593Smuzhiyun pageCount = end - start;
761*4882a593Smuzhiyun break;
762*4882a593Smuzhiyun
763*4882a593Smuzhiyun /* YUV FORMAT */
764*4882a593Smuzhiyun case RGA2_FORMAT_YCbCr_422_SP :
765*4882a593Smuzhiyun case RGA2_FORMAT_YCrCb_422_SP :
766*4882a593Smuzhiyun stride = (w + 3) & (~3);
767*4882a593Smuzhiyun size_yrgb = stride * h;
768*4882a593Smuzhiyun size_uv = stride * h;
769*4882a593Smuzhiyun start = MIN(yrgb_addr, uv_addr);
770*4882a593Smuzhiyun start >>= PAGE_SHIFT;
771*4882a593Smuzhiyun end = MAX((yrgb_addr + size_yrgb), (uv_addr + size_uv));
772*4882a593Smuzhiyun end = (end + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
773*4882a593Smuzhiyun pageCount = end - start;
774*4882a593Smuzhiyun break;
775*4882a593Smuzhiyun case RGA2_FORMAT_YCbCr_422_P :
776*4882a593Smuzhiyun case RGA2_FORMAT_YCrCb_422_P :
777*4882a593Smuzhiyun stride = (w + 3) & (~3);
778*4882a593Smuzhiyun size_yrgb = stride * h;
779*4882a593Smuzhiyun size_uv = ((stride >> 1) * h);
780*4882a593Smuzhiyun size_v = ((stride >> 1) * h);
781*4882a593Smuzhiyun start = MIN(MIN(yrgb_addr, uv_addr), v_addr);
782*4882a593Smuzhiyun start = start >> PAGE_SHIFT;
783*4882a593Smuzhiyun end = MAX(MAX((yrgb_addr + size_yrgb), (uv_addr + size_uv)), (v_addr + size_v));
784*4882a593Smuzhiyun end = (end + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
785*4882a593Smuzhiyun pageCount = end - start;
786*4882a593Smuzhiyun break;
787*4882a593Smuzhiyun case RGA2_FORMAT_YCbCr_420_SP :
788*4882a593Smuzhiyun case RGA2_FORMAT_YCrCb_420_SP :
789*4882a593Smuzhiyun stride = (w + 3) & (~3);
790*4882a593Smuzhiyun size_yrgb = stride * h;
791*4882a593Smuzhiyun size_uv = (stride * (h >> 1));
792*4882a593Smuzhiyun start = MIN(yrgb_addr, uv_addr);
793*4882a593Smuzhiyun start >>= PAGE_SHIFT;
794*4882a593Smuzhiyun end = MAX((yrgb_addr + size_yrgb), (uv_addr + size_uv));
795*4882a593Smuzhiyun end = (end + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
796*4882a593Smuzhiyun pageCount = end - start;
797*4882a593Smuzhiyun break;
798*4882a593Smuzhiyun case RGA2_FORMAT_YCbCr_420_P :
799*4882a593Smuzhiyun case RGA2_FORMAT_YCrCb_420_P :
800*4882a593Smuzhiyun stride = (w + 3) & (~3);
801*4882a593Smuzhiyun size_yrgb = stride * h;
802*4882a593Smuzhiyun size_uv = ((stride >> 1) * (h >> 1));
803*4882a593Smuzhiyun size_v = ((stride >> 1) * (h >> 1));
804*4882a593Smuzhiyun start = MIN(MIN(yrgb_addr, uv_addr), v_addr);
805*4882a593Smuzhiyun start >>= PAGE_SHIFT;
806*4882a593Smuzhiyun end = MAX(MAX((yrgb_addr + size_yrgb), (uv_addr + size_uv)), (v_addr + size_v));
807*4882a593Smuzhiyun end = (end + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
808*4882a593Smuzhiyun pageCount = end - start;
809*4882a593Smuzhiyun break;
810*4882a593Smuzhiyun case RGA2_FORMAT_YCbCr_400:
811*4882a593Smuzhiyun stride = (w + 3) & (~3);
812*4882a593Smuzhiyun size_yrgb = stride * h;
813*4882a593Smuzhiyun size_uv = 0;
814*4882a593Smuzhiyun size_v = 0;
815*4882a593Smuzhiyun start = yrgb_addr >> PAGE_SHIFT;
816*4882a593Smuzhiyun end = yrgb_addr + size_yrgb;
817*4882a593Smuzhiyun end = (end + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
818*4882a593Smuzhiyun pageCount = end - start;
819*4882a593Smuzhiyun break;
820*4882a593Smuzhiyun case RGA2_FORMAT_Y4:
821*4882a593Smuzhiyun stride = ((w + 3) & (~3) ) >> 1;
822*4882a593Smuzhiyun size_yrgb = stride * h;
823*4882a593Smuzhiyun size_uv = 0;
824*4882a593Smuzhiyun size_v = 0;
825*4882a593Smuzhiyun start = yrgb_addr >> PAGE_SHIFT;
826*4882a593Smuzhiyun end = yrgb_addr + size_yrgb;
827*4882a593Smuzhiyun end = (end + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
828*4882a593Smuzhiyun pageCount = end - start;
829*4882a593Smuzhiyun break;
830*4882a593Smuzhiyun case RGA2_FORMAT_YVYU_422:
831*4882a593Smuzhiyun case RGA2_FORMAT_VYUY_422:
832*4882a593Smuzhiyun case RGA2_FORMAT_YUYV_422:
833*4882a593Smuzhiyun case RGA2_FORMAT_UYVY_422:
834*4882a593Smuzhiyun stride = (w + 3) & (~3);
835*4882a593Smuzhiyun size_yrgb = stride * h;
836*4882a593Smuzhiyun size_uv = stride * h;
837*4882a593Smuzhiyun start = MIN(yrgb_addr, uv_addr);
838*4882a593Smuzhiyun start >>= PAGE_SHIFT;
839*4882a593Smuzhiyun end = MAX((yrgb_addr + size_yrgb), (uv_addr + size_uv));
840*4882a593Smuzhiyun end = (end + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
841*4882a593Smuzhiyun pageCount = end - start;
842*4882a593Smuzhiyun break;
843*4882a593Smuzhiyun case RGA2_FORMAT_YVYU_420:
844*4882a593Smuzhiyun case RGA2_FORMAT_VYUY_420:
845*4882a593Smuzhiyun case RGA2_FORMAT_YUYV_420:
846*4882a593Smuzhiyun case RGA2_FORMAT_UYVY_420:
847*4882a593Smuzhiyun stride = (w + 3) & (~3);
848*4882a593Smuzhiyun size_yrgb = stride * h;
849*4882a593Smuzhiyun size_uv = (stride * (h >> 1));
850*4882a593Smuzhiyun start = MIN(yrgb_addr, uv_addr);
851*4882a593Smuzhiyun start >>= PAGE_SHIFT;
852*4882a593Smuzhiyun end = MAX((yrgb_addr + size_yrgb), (uv_addr + size_uv));
853*4882a593Smuzhiyun end = (end + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
854*4882a593Smuzhiyun pageCount = end - start;
855*4882a593Smuzhiyun break;
856*4882a593Smuzhiyun #if 0
857*4882a593Smuzhiyun case RK_FORMAT_BPP1 :
858*4882a593Smuzhiyun break;
859*4882a593Smuzhiyun case RK_FORMAT_BPP2 :
860*4882a593Smuzhiyun break;
861*4882a593Smuzhiyun case RK_FORMAT_BPP4 :
862*4882a593Smuzhiyun break;
863*4882a593Smuzhiyun case RK_FORMAT_BPP8 :
864*4882a593Smuzhiyun break;
865*4882a593Smuzhiyun #endif
866*4882a593Smuzhiyun case RGA2_FORMAT_YCbCr_420_SP_10B:
867*4882a593Smuzhiyun case RGA2_FORMAT_YCrCb_420_SP_10B:
868*4882a593Smuzhiyun stride = (w + 3) & (~3);
869*4882a593Smuzhiyun size_yrgb = stride * h;
870*4882a593Smuzhiyun size_uv = (stride * (h >> 1));
871*4882a593Smuzhiyun start = MIN(yrgb_addr, uv_addr);
872*4882a593Smuzhiyun start >>= PAGE_SHIFT;
873*4882a593Smuzhiyun end = MAX((yrgb_addr + size_yrgb), (uv_addr + size_uv));
874*4882a593Smuzhiyun end = (end + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
875*4882a593Smuzhiyun pageCount = end - start;
876*4882a593Smuzhiyun break;
877*4882a593Smuzhiyun default :
878*4882a593Smuzhiyun pageCount = 0;
879*4882a593Smuzhiyun start = 0;
880*4882a593Smuzhiyun break;
881*4882a593Smuzhiyun }
882*4882a593Smuzhiyun
883*4882a593Smuzhiyun *StartAddr = start;
884*4882a593Smuzhiyun return pageCount;
885*4882a593Smuzhiyun }
886*4882a593Smuzhiyun
rga2_MapUserMemory(struct page ** pages,uint32_t * pageTable,unsigned long Memory,uint32_t pageCount,int writeFlag,int map)887*4882a593Smuzhiyun static int rga2_MapUserMemory(struct page **pages, uint32_t *pageTable,
888*4882a593Smuzhiyun unsigned long Memory, uint32_t pageCount,
889*4882a593Smuzhiyun int writeFlag, int map)
890*4882a593Smuzhiyun {
891*4882a593Smuzhiyun struct vm_area_struct *vma;
892*4882a593Smuzhiyun int32_t result;
893*4882a593Smuzhiyun uint32_t i;
894*4882a593Smuzhiyun uint32_t status;
895*4882a593Smuzhiyun unsigned long Address;
896*4882a593Smuzhiyun unsigned long pfn;
897*4882a593Smuzhiyun struct page __maybe_unused *page;
898*4882a593Smuzhiyun spinlock_t * ptl;
899*4882a593Smuzhiyun pte_t * pte;
900*4882a593Smuzhiyun pgd_t * pgd;
901*4882a593Smuzhiyun #if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 10, 0)
902*4882a593Smuzhiyun p4d_t * p4d;
903*4882a593Smuzhiyun #endif
904*4882a593Smuzhiyun pud_t * pud;
905*4882a593Smuzhiyun pmd_t * pmd;
906*4882a593Smuzhiyun
907*4882a593Smuzhiyun status = 0;
908*4882a593Smuzhiyun Address = 0;
909*4882a593Smuzhiyun
910*4882a593Smuzhiyun #if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 10, 0)
911*4882a593Smuzhiyun mmap_read_lock(current->mm);
912*4882a593Smuzhiyun #else
913*4882a593Smuzhiyun down_read(¤t->mm->mmap_sem);
914*4882a593Smuzhiyun #endif
915*4882a593Smuzhiyun
916*4882a593Smuzhiyun #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 168) && LINUX_VERSION_CODE < KERNEL_VERSION(4, 5, 0)
917*4882a593Smuzhiyun result = get_user_pages(current, current->mm, Memory << PAGE_SHIFT,
918*4882a593Smuzhiyun pageCount, writeFlag ? FOLL_WRITE : 0,
919*4882a593Smuzhiyun pages, NULL);
920*4882a593Smuzhiyun #elif LINUX_VERSION_CODE < KERNEL_VERSION(4, 6, 0)
921*4882a593Smuzhiyun result = get_user_pages(current, current->mm, Memory << PAGE_SHIFT,
922*4882a593Smuzhiyun pageCount, writeFlag, 0, pages, NULL);
923*4882a593Smuzhiyun #elif LINUX_VERSION_CODE < KERNEL_VERSION(5, 10, 0)
924*4882a593Smuzhiyun result = get_user_pages_remote(current, current->mm,
925*4882a593Smuzhiyun Memory << PAGE_SHIFT,
926*4882a593Smuzhiyun pageCount, writeFlag, pages, NULL, NULL);
927*4882a593Smuzhiyun #else
928*4882a593Smuzhiyun result = get_user_pages_remote(current->mm, Memory << PAGE_SHIFT,
929*4882a593Smuzhiyun pageCount, writeFlag, pages, NULL, NULL);
930*4882a593Smuzhiyun #endif
931*4882a593Smuzhiyun
932*4882a593Smuzhiyun if (result > 0 && result >= pageCount) {
933*4882a593Smuzhiyun /* Fill the page table. */
934*4882a593Smuzhiyun for (i = 0; i < pageCount; i++) {
935*4882a593Smuzhiyun /* Get the physical address from page struct. */
936*4882a593Smuzhiyun pageTable[i] = rga2_dma_flush_page(pages[i], map);
937*4882a593Smuzhiyun }
938*4882a593Smuzhiyun
939*4882a593Smuzhiyun for (i = 0; i < result; i++)
940*4882a593Smuzhiyun put_page(pages[i]);
941*4882a593Smuzhiyun #if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 10, 0)
942*4882a593Smuzhiyun mmap_read_unlock(current->mm);
943*4882a593Smuzhiyun #else
944*4882a593Smuzhiyun up_read(¤t->mm->mmap_sem);
945*4882a593Smuzhiyun #endif
946*4882a593Smuzhiyun return 0;
947*4882a593Smuzhiyun }
948*4882a593Smuzhiyun if (result > 0) {
949*4882a593Smuzhiyun for (i = 0; i < result; i++)
950*4882a593Smuzhiyun put_page(pages[i]);
951*4882a593Smuzhiyun }
952*4882a593Smuzhiyun for (i = 0; i < pageCount; i++) {
953*4882a593Smuzhiyun vma = find_vma(current->mm, (Memory + i) << PAGE_SHIFT);
954*4882a593Smuzhiyun if (!vma) {
955*4882a593Smuzhiyun pr_err("RGA2 failed to get vma, result = %d, pageCount = %d\n",
956*4882a593Smuzhiyun result, pageCount);
957*4882a593Smuzhiyun status = RGA2_OUT_OF_RESOURCES;
958*4882a593Smuzhiyun break;
959*4882a593Smuzhiyun }
960*4882a593Smuzhiyun pgd = pgd_offset(current->mm, (Memory + i) << PAGE_SHIFT);
961*4882a593Smuzhiyun if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd))) {
962*4882a593Smuzhiyun pr_err("RGA2 failed to get pgd, result = %d, pageCount = %d\n",
963*4882a593Smuzhiyun result, pageCount);
964*4882a593Smuzhiyun status = RGA2_OUT_OF_RESOURCES;
965*4882a593Smuzhiyun break;
966*4882a593Smuzhiyun }
967*4882a593Smuzhiyun #if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 10, 0)
968*4882a593Smuzhiyun /* In the four-level page table, it will do nothing and return pgd. */
969*4882a593Smuzhiyun p4d = p4d_offset(pgd, (Memory + i) << PAGE_SHIFT);
970*4882a593Smuzhiyun if (p4d_none(*p4d) || unlikely(p4d_bad(*p4d))) {
971*4882a593Smuzhiyun pr_err("RGA2 failed to get p4d, result = %d, pageCount = %d\n",
972*4882a593Smuzhiyun result, pageCount);
973*4882a593Smuzhiyun status = RGA2_OUT_OF_RESOURCES;
974*4882a593Smuzhiyun break;
975*4882a593Smuzhiyun }
976*4882a593Smuzhiyun
977*4882a593Smuzhiyun pud = pud_offset(p4d, (Memory + i) << PAGE_SHIFT);
978*4882a593Smuzhiyun #else
979*4882a593Smuzhiyun pud = pud_offset(pgd, (Memory + i) << PAGE_SHIFT);
980*4882a593Smuzhiyun #endif
981*4882a593Smuzhiyun if (pud_none(*pud) || unlikely(pud_bad(*pud))) {
982*4882a593Smuzhiyun pr_err("RGA2 failed to get pud, result = %d, pageCount = %d\n",
983*4882a593Smuzhiyun result, pageCount);
984*4882a593Smuzhiyun status = RGA2_OUT_OF_RESOURCES;
985*4882a593Smuzhiyun break;
986*4882a593Smuzhiyun }
987*4882a593Smuzhiyun pmd = pmd_offset(pud, (Memory + i) << PAGE_SHIFT);
988*4882a593Smuzhiyun if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd))) {
989*4882a593Smuzhiyun pr_err("RGA2 failed to get pmd, result = %d, pageCount = %d\n",
990*4882a593Smuzhiyun result, pageCount);
991*4882a593Smuzhiyun status = RGA2_OUT_OF_RESOURCES;
992*4882a593Smuzhiyun break;
993*4882a593Smuzhiyun }
994*4882a593Smuzhiyun pte = pte_offset_map_lock(current->mm, pmd,
995*4882a593Smuzhiyun (Memory + i) << PAGE_SHIFT,
996*4882a593Smuzhiyun &ptl);
997*4882a593Smuzhiyun if (pte_none(*pte)) {
998*4882a593Smuzhiyun pr_err("RGA2 failed to get pte, result = %d, pageCount = %d\n",
999*4882a593Smuzhiyun result, pageCount);
1000*4882a593Smuzhiyun pte_unmap_unlock(pte, ptl);
1001*4882a593Smuzhiyun status = RGA2_OUT_OF_RESOURCES;
1002*4882a593Smuzhiyun break;
1003*4882a593Smuzhiyun }
1004*4882a593Smuzhiyun pfn = pte_pfn(*pte);
1005*4882a593Smuzhiyun Address = ((pfn << PAGE_SHIFT) |
1006*4882a593Smuzhiyun (((unsigned long)((Memory + i) << PAGE_SHIFT)) & ~PAGE_MASK));
1007*4882a593Smuzhiyun
1008*4882a593Smuzhiyun pageTable[i] = rga2_dma_flush_page(phys_to_page(Address), map);
1009*4882a593Smuzhiyun
1010*4882a593Smuzhiyun pte_unmap_unlock(pte, ptl);
1011*4882a593Smuzhiyun }
1012*4882a593Smuzhiyun #if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 10, 0)
1013*4882a593Smuzhiyun mmap_read_unlock(current->mm);
1014*4882a593Smuzhiyun #else
1015*4882a593Smuzhiyun up_read(¤t->mm->mmap_sem);
1016*4882a593Smuzhiyun #endif
1017*4882a593Smuzhiyun return status;
1018*4882a593Smuzhiyun }
1019*4882a593Smuzhiyun
rga2_MapION(struct sg_table * sg,uint32_t * Memory,int32_t pageCount)1020*4882a593Smuzhiyun static int rga2_MapION(struct sg_table *sg,
1021*4882a593Smuzhiyun uint32_t *Memory,
1022*4882a593Smuzhiyun int32_t pageCount)
1023*4882a593Smuzhiyun {
1024*4882a593Smuzhiyun uint32_t i;
1025*4882a593Smuzhiyun uint32_t status;
1026*4882a593Smuzhiyun unsigned long Address;
1027*4882a593Smuzhiyun uint32_t mapped_size = 0;
1028*4882a593Smuzhiyun uint32_t len;
1029*4882a593Smuzhiyun struct scatterlist *sgl = sg->sgl;
1030*4882a593Smuzhiyun uint32_t sg_num = 0;
1031*4882a593Smuzhiyun uint32_t break_flag = 0;
1032*4882a593Smuzhiyun
1033*4882a593Smuzhiyun status = 0;
1034*4882a593Smuzhiyun Address = 0;
1035*4882a593Smuzhiyun do {
1036*4882a593Smuzhiyun len = sg_dma_len(sgl) >> PAGE_SHIFT;
1037*4882a593Smuzhiyun /*
1038*4882a593Smuzhiyun * The fd passed by user space gets sg through dma_buf_map_attachment,
1039*4882a593Smuzhiyun * so dma_address can be use here.
1040*4882a593Smuzhiyun */
1041*4882a593Smuzhiyun Address = sg_dma_address(sgl);
1042*4882a593Smuzhiyun
1043*4882a593Smuzhiyun for(i=0; i<len; i++) {
1044*4882a593Smuzhiyun if (mapped_size + i >= pageCount) {
1045*4882a593Smuzhiyun break_flag = 1;
1046*4882a593Smuzhiyun break;
1047*4882a593Smuzhiyun }
1048*4882a593Smuzhiyun Memory[mapped_size + i] = (uint32_t)(Address + (i << PAGE_SHIFT));
1049*4882a593Smuzhiyun }
1050*4882a593Smuzhiyun if (break_flag)
1051*4882a593Smuzhiyun break;
1052*4882a593Smuzhiyun mapped_size += len;
1053*4882a593Smuzhiyun sg_num += 1;
1054*4882a593Smuzhiyun }
1055*4882a593Smuzhiyun while((sgl = sg_next(sgl)) && (mapped_size < pageCount) && (sg_num < sg->nents));
1056*4882a593Smuzhiyun
1057*4882a593Smuzhiyun return 0;
1058*4882a593Smuzhiyun }
1059*4882a593Smuzhiyun
rga2_mmu_flush_cache(struct rga2_reg * reg,struct rga2_req * req)1060*4882a593Smuzhiyun static int rga2_mmu_flush_cache(struct rga2_reg *reg, struct rga2_req *req)
1061*4882a593Smuzhiyun {
1062*4882a593Smuzhiyun int DstMemSize;
1063*4882a593Smuzhiyun unsigned long DstStart, DstPageCount;
1064*4882a593Smuzhiyun uint32_t *MMU_Base, *MMU_Base_phys;
1065*4882a593Smuzhiyun int ret;
1066*4882a593Smuzhiyun int status;
1067*4882a593Smuzhiyun struct page **pages = NULL;
1068*4882a593Smuzhiyun struct rga_dma_buffer_t *dma_buffer = NULL;
1069*4882a593Smuzhiyun
1070*4882a593Smuzhiyun MMU_Base = NULL;
1071*4882a593Smuzhiyun DstMemSize = 0;
1072*4882a593Smuzhiyun DstPageCount = 0;
1073*4882a593Smuzhiyun DstStart = 0;
1074*4882a593Smuzhiyun
1075*4882a593Smuzhiyun if (reg->MMU_map != true) {
1076*4882a593Smuzhiyun status = -EINVAL;
1077*4882a593Smuzhiyun goto out;
1078*4882a593Smuzhiyun }
1079*4882a593Smuzhiyun
1080*4882a593Smuzhiyun /* cal dst buf mmu info */
1081*4882a593Smuzhiyun if (req->mmu_info.dst_mmu_flag & 1) {
1082*4882a593Smuzhiyun DstPageCount = rga2_buf_size_cal(req->dst.yrgb_addr,
1083*4882a593Smuzhiyun req->dst.uv_addr,
1084*4882a593Smuzhiyun req->dst.v_addr,
1085*4882a593Smuzhiyun req->dst.format,
1086*4882a593Smuzhiyun req->dst.vir_w,
1087*4882a593Smuzhiyun req->dst.vir_h,
1088*4882a593Smuzhiyun &DstStart);
1089*4882a593Smuzhiyun if (DstPageCount == 0)
1090*4882a593Smuzhiyun return -EINVAL;
1091*4882a593Smuzhiyun }
1092*4882a593Smuzhiyun /* Cal out the needed mem size */
1093*4882a593Smuzhiyun DstMemSize = (DstPageCount + 15) & (~15);
1094*4882a593Smuzhiyun
1095*4882a593Smuzhiyun if (rga2_mmu_buf_get_try(&rga2_mmu_buf, DstMemSize)) {
1096*4882a593Smuzhiyun pr_err("RGA2 Get MMU mem failed\n");
1097*4882a593Smuzhiyun status = RGA2_MALLOC_ERROR;
1098*4882a593Smuzhiyun goto out;
1099*4882a593Smuzhiyun }
1100*4882a593Smuzhiyun pages = rga2_mmu_buf.pages;
1101*4882a593Smuzhiyun mutex_lock(&rga2_service.lock);
1102*4882a593Smuzhiyun MMU_Base = rga2_mmu_buf.buf_virtual +
1103*4882a593Smuzhiyun (rga2_mmu_buf.front & (rga2_mmu_buf.size - 1));
1104*4882a593Smuzhiyun MMU_Base_phys = rga2_mmu_buf.buf +
1105*4882a593Smuzhiyun (rga2_mmu_buf.front & (rga2_mmu_buf.size - 1));
1106*4882a593Smuzhiyun
1107*4882a593Smuzhiyun mutex_unlock(&rga2_service.lock);
1108*4882a593Smuzhiyun if (DstMemSize) {
1109*4882a593Smuzhiyun dma_buffer = ®->dma_buffer_dst;
1110*4882a593Smuzhiyun if (dma_buffer->sgt) {
1111*4882a593Smuzhiyun status = -EINVAL;
1112*4882a593Smuzhiyun goto out;
1113*4882a593Smuzhiyun } else {
1114*4882a593Smuzhiyun ret = rga2_MapUserMemory(&pages[0],
1115*4882a593Smuzhiyun MMU_Base,
1116*4882a593Smuzhiyun DstStart, DstPageCount, 1,
1117*4882a593Smuzhiyun MMU_MAP_CLEAN | MMU_MAP_INVALID);
1118*4882a593Smuzhiyun #ifdef CONFIG_ROCKCHIP_RGA2_DEBUGGER
1119*4882a593Smuzhiyun if (RGA2_CHECK_MODE)
1120*4882a593Smuzhiyun rga2_user_memory_check(&pages[0],
1121*4882a593Smuzhiyun req->dst.vir_w,
1122*4882a593Smuzhiyun req->dst.vir_h,
1123*4882a593Smuzhiyun req->dst.format,
1124*4882a593Smuzhiyun 2);
1125*4882a593Smuzhiyun #endif
1126*4882a593Smuzhiyun }
1127*4882a593Smuzhiyun if (ret < 0) {
1128*4882a593Smuzhiyun pr_err("rga2 unmap dst memory failed\n");
1129*4882a593Smuzhiyun status = ret;
1130*4882a593Smuzhiyun goto out;
1131*4882a593Smuzhiyun }
1132*4882a593Smuzhiyun }
1133*4882a593Smuzhiyun rga2_mmu_buf_get(&rga2_mmu_buf, DstMemSize);
1134*4882a593Smuzhiyun reg->MMU_len = DstMemSize;
1135*4882a593Smuzhiyun status = 0;
1136*4882a593Smuzhiyun out:
1137*4882a593Smuzhiyun return status;
1138*4882a593Smuzhiyun }
1139*4882a593Smuzhiyun
rga2_mmu_info_BitBlt_mode(struct rga2_reg * reg,struct rga2_req * req)1140*4882a593Smuzhiyun static int rga2_mmu_info_BitBlt_mode(struct rga2_reg *reg, struct rga2_req *req)
1141*4882a593Smuzhiyun {
1142*4882a593Smuzhiyun int Src0MemSize, DstMemSize, Src1MemSize;
1143*4882a593Smuzhiyun unsigned long Src0Start, Src1Start, DstStart;
1144*4882a593Smuzhiyun unsigned long Src0PageCount, Src1PageCount, DstPageCount;
1145*4882a593Smuzhiyun uint32_t AllSize;
1146*4882a593Smuzhiyun uint32_t *MMU_Base, *MMU_Base_phys;
1147*4882a593Smuzhiyun int ret;
1148*4882a593Smuzhiyun int status;
1149*4882a593Smuzhiyun uint32_t uv_size, v_size;
1150*4882a593Smuzhiyun struct page **pages = NULL;
1151*4882a593Smuzhiyun struct rga_dma_buffer_t *dma_buffer = NULL;
1152*4882a593Smuzhiyun
1153*4882a593Smuzhiyun MMU_Base = NULL;
1154*4882a593Smuzhiyun Src0MemSize = 0;
1155*4882a593Smuzhiyun Src1MemSize = 0;
1156*4882a593Smuzhiyun DstMemSize = 0;
1157*4882a593Smuzhiyun Src0PageCount = 0;
1158*4882a593Smuzhiyun Src1PageCount = 0;
1159*4882a593Smuzhiyun DstPageCount = 0;
1160*4882a593Smuzhiyun Src0Start = 0;
1161*4882a593Smuzhiyun Src1Start = 0;
1162*4882a593Smuzhiyun DstStart = 0;
1163*4882a593Smuzhiyun
1164*4882a593Smuzhiyun /* cal src0 buf mmu info */
1165*4882a593Smuzhiyun if (req->mmu_info.src0_mmu_flag & 1) {
1166*4882a593Smuzhiyun Src0PageCount = rga2_buf_size_cal(req->src.yrgb_addr,
1167*4882a593Smuzhiyun req->src.uv_addr,
1168*4882a593Smuzhiyun req->src.v_addr,
1169*4882a593Smuzhiyun req->src.format,
1170*4882a593Smuzhiyun req->src.vir_w,
1171*4882a593Smuzhiyun (req->src.vir_h),
1172*4882a593Smuzhiyun &Src0Start);
1173*4882a593Smuzhiyun if (Src0PageCount == 0)
1174*4882a593Smuzhiyun return -EINVAL;
1175*4882a593Smuzhiyun }
1176*4882a593Smuzhiyun /* cal src1 buf mmu info */
1177*4882a593Smuzhiyun if (req->mmu_info.src1_mmu_flag & 1) {
1178*4882a593Smuzhiyun Src1PageCount = rga2_buf_size_cal(req->src1.yrgb_addr,
1179*4882a593Smuzhiyun req->src1.uv_addr,
1180*4882a593Smuzhiyun req->src1.v_addr,
1181*4882a593Smuzhiyun req->src1.format,
1182*4882a593Smuzhiyun req->src1.vir_w,
1183*4882a593Smuzhiyun (req->src1.vir_h),
1184*4882a593Smuzhiyun &Src1Start);
1185*4882a593Smuzhiyun if (Src1PageCount == 0)
1186*4882a593Smuzhiyun return -EINVAL;
1187*4882a593Smuzhiyun }
1188*4882a593Smuzhiyun /* cal dst buf mmu info */
1189*4882a593Smuzhiyun if (req->mmu_info.dst_mmu_flag & 1) {
1190*4882a593Smuzhiyun DstPageCount = rga2_buf_size_cal(req->dst.yrgb_addr,
1191*4882a593Smuzhiyun req->dst.uv_addr,
1192*4882a593Smuzhiyun req->dst.v_addr,
1193*4882a593Smuzhiyun req->dst.format,
1194*4882a593Smuzhiyun req->dst.vir_w,
1195*4882a593Smuzhiyun req->dst.vir_h,
1196*4882a593Smuzhiyun &DstStart);
1197*4882a593Smuzhiyun if (DstPageCount == 0)
1198*4882a593Smuzhiyun return -EINVAL;
1199*4882a593Smuzhiyun }
1200*4882a593Smuzhiyun /* Cal out the needed mem size */
1201*4882a593Smuzhiyun Src0MemSize = (Src0PageCount + 15) & (~15);
1202*4882a593Smuzhiyun Src1MemSize = (Src1PageCount + 15) & (~15);
1203*4882a593Smuzhiyun DstMemSize = (DstPageCount + 15) & (~15);
1204*4882a593Smuzhiyun AllSize = Src0MemSize + Src1MemSize + DstMemSize;
1205*4882a593Smuzhiyun
1206*4882a593Smuzhiyun if (rga2_mmu_buf_get_try(&rga2_mmu_buf, AllSize)) {
1207*4882a593Smuzhiyun pr_err("RGA2 Get MMU mem failed\n");
1208*4882a593Smuzhiyun status = RGA2_MALLOC_ERROR;
1209*4882a593Smuzhiyun goto out;
1210*4882a593Smuzhiyun }
1211*4882a593Smuzhiyun
1212*4882a593Smuzhiyun pages = rga2_mmu_buf.pages;
1213*4882a593Smuzhiyun if(pages == NULL) {
1214*4882a593Smuzhiyun pr_err("RGA MMU malloc pages mem failed\n");
1215*4882a593Smuzhiyun return -EINVAL;
1216*4882a593Smuzhiyun }
1217*4882a593Smuzhiyun
1218*4882a593Smuzhiyun mutex_lock(&rga2_service.lock);
1219*4882a593Smuzhiyun MMU_Base = rga2_mmu_buf.buf_virtual + rga2_mmu_buf.front;
1220*4882a593Smuzhiyun MMU_Base_phys = rga2_mmu_buf.buf + rga2_mmu_buf.front;
1221*4882a593Smuzhiyun mutex_unlock(&rga2_service.lock);
1222*4882a593Smuzhiyun
1223*4882a593Smuzhiyun if (Src0MemSize) {
1224*4882a593Smuzhiyun dma_buffer = ®->dma_buffer_src0;
1225*4882a593Smuzhiyun
1226*4882a593Smuzhiyun if (dma_buffer->sgt) {
1227*4882a593Smuzhiyun ret = rga2_MapION(dma_buffer->sgt,
1228*4882a593Smuzhiyun &MMU_Base[0], Src0MemSize);
1229*4882a593Smuzhiyun } else {
1230*4882a593Smuzhiyun ret = rga2_MapUserMemory(&pages[0], &MMU_Base[0],
1231*4882a593Smuzhiyun Src0Start, Src0PageCount,
1232*4882a593Smuzhiyun 0, MMU_MAP_CLEAN);
1233*4882a593Smuzhiyun #ifdef CONFIG_ROCKCHIP_RGA2_DEBUGGER
1234*4882a593Smuzhiyun if (RGA2_CHECK_MODE)
1235*4882a593Smuzhiyun rga2_user_memory_check(&pages[0],
1236*4882a593Smuzhiyun req->src.vir_w,
1237*4882a593Smuzhiyun req->src.vir_h,
1238*4882a593Smuzhiyun req->src.format,
1239*4882a593Smuzhiyun 1);
1240*4882a593Smuzhiyun #endif
1241*4882a593Smuzhiyun
1242*4882a593Smuzhiyun /* Save pagetable to unmap. */
1243*4882a593Smuzhiyun reg->MMU_src0_base = MMU_Base;
1244*4882a593Smuzhiyun reg->MMU_src0_count = Src0PageCount;
1245*4882a593Smuzhiyun }
1246*4882a593Smuzhiyun
1247*4882a593Smuzhiyun if (ret < 0) {
1248*4882a593Smuzhiyun pr_err("rga2 map src0 memory failed\n");
1249*4882a593Smuzhiyun status = ret;
1250*4882a593Smuzhiyun goto out;
1251*4882a593Smuzhiyun }
1252*4882a593Smuzhiyun /* change the buf address in req struct */
1253*4882a593Smuzhiyun req->mmu_info.src0_base_addr = (((unsigned long)MMU_Base_phys));
1254*4882a593Smuzhiyun uv_size = (req->src.uv_addr
1255*4882a593Smuzhiyun - (Src0Start << PAGE_SHIFT)) >> PAGE_SHIFT;
1256*4882a593Smuzhiyun v_size = (req->src.v_addr
1257*4882a593Smuzhiyun - (Src0Start << PAGE_SHIFT)) >> PAGE_SHIFT;
1258*4882a593Smuzhiyun
1259*4882a593Smuzhiyun req->src.yrgb_addr = (req->src.yrgb_addr & (~PAGE_MASK));
1260*4882a593Smuzhiyun req->src.uv_addr = (req->src.uv_addr & (~PAGE_MASK)) |
1261*4882a593Smuzhiyun (uv_size << PAGE_SHIFT);
1262*4882a593Smuzhiyun req->src.v_addr = (req->src.v_addr & (~PAGE_MASK)) |
1263*4882a593Smuzhiyun (v_size << PAGE_SHIFT);
1264*4882a593Smuzhiyun }
1265*4882a593Smuzhiyun
1266*4882a593Smuzhiyun if (Src1MemSize) {
1267*4882a593Smuzhiyun dma_buffer = ®->dma_buffer_src1;
1268*4882a593Smuzhiyun
1269*4882a593Smuzhiyun if (dma_buffer->sgt) {
1270*4882a593Smuzhiyun ret = rga2_MapION(dma_buffer->sgt,
1271*4882a593Smuzhiyun MMU_Base + Src0MemSize, Src1MemSize);
1272*4882a593Smuzhiyun } else {
1273*4882a593Smuzhiyun ret = rga2_MapUserMemory(&pages[0],
1274*4882a593Smuzhiyun MMU_Base + Src0MemSize,
1275*4882a593Smuzhiyun Src1Start, Src1PageCount,
1276*4882a593Smuzhiyun 0, MMU_MAP_CLEAN);
1277*4882a593Smuzhiyun
1278*4882a593Smuzhiyun /* Save pagetable to unmap. */
1279*4882a593Smuzhiyun reg->MMU_src1_base = MMU_Base + Src0MemSize;
1280*4882a593Smuzhiyun reg->MMU_src1_count = Src1PageCount;
1281*4882a593Smuzhiyun }
1282*4882a593Smuzhiyun if (ret < 0) {
1283*4882a593Smuzhiyun pr_err("rga2 map src1 memory failed\n");
1284*4882a593Smuzhiyun status = ret;
1285*4882a593Smuzhiyun goto out;
1286*4882a593Smuzhiyun }
1287*4882a593Smuzhiyun /* change the buf address in req struct */
1288*4882a593Smuzhiyun req->mmu_info.src1_base_addr = ((unsigned long)(MMU_Base_phys
1289*4882a593Smuzhiyun + Src0MemSize));
1290*4882a593Smuzhiyun req->src1.yrgb_addr = (req->src1.yrgb_addr & (~PAGE_MASK));
1291*4882a593Smuzhiyun }
1292*4882a593Smuzhiyun if (DstMemSize) {
1293*4882a593Smuzhiyun dma_buffer = ®->dma_buffer_dst;
1294*4882a593Smuzhiyun
1295*4882a593Smuzhiyun if (dma_buffer->sgt) {
1296*4882a593Smuzhiyun ret = rga2_MapION(dma_buffer->sgt, MMU_Base + Src0MemSize
1297*4882a593Smuzhiyun + Src1MemSize, DstMemSize);
1298*4882a593Smuzhiyun } else if (req->alpha_mode_0 != 0 && req->bitblt_mode == 0) {
1299*4882a593Smuzhiyun /* The blend mode of src + dst => dst requires clean and invalidate */
1300*4882a593Smuzhiyun ret = rga2_MapUserMemory(&pages[0], MMU_Base
1301*4882a593Smuzhiyun + Src0MemSize + Src1MemSize,
1302*4882a593Smuzhiyun DstStart, DstPageCount, 1,
1303*4882a593Smuzhiyun MMU_MAP_CLEAN | MMU_MAP_INVALID);
1304*4882a593Smuzhiyun #ifdef CONFIG_ROCKCHIP_RGA2_DEBUGGER
1305*4882a593Smuzhiyun if (RGA2_CHECK_MODE)
1306*4882a593Smuzhiyun rga2_user_memory_check(&pages[0],
1307*4882a593Smuzhiyun req->dst.vir_w,
1308*4882a593Smuzhiyun req->dst.vir_h,
1309*4882a593Smuzhiyun req->dst.format,
1310*4882a593Smuzhiyun 2);
1311*4882a593Smuzhiyun #endif
1312*4882a593Smuzhiyun
1313*4882a593Smuzhiyun /* Save pagetable to invalid cache and unmap. */
1314*4882a593Smuzhiyun reg->MMU_dst_base = MMU_Base + Src0MemSize + Src1MemSize;
1315*4882a593Smuzhiyun reg->MMU_dst_count = DstPageCount;
1316*4882a593Smuzhiyun } else {
1317*4882a593Smuzhiyun ret = rga2_MapUserMemory(&pages[0], MMU_Base
1318*4882a593Smuzhiyun + Src0MemSize + Src1MemSize,
1319*4882a593Smuzhiyun DstStart, DstPageCount,
1320*4882a593Smuzhiyun 1, MMU_MAP_INVALID);
1321*4882a593Smuzhiyun #ifdef CONFIG_ROCKCHIP_RGA2_DEBUGGER
1322*4882a593Smuzhiyun if (RGA2_CHECK_MODE)
1323*4882a593Smuzhiyun rga2_user_memory_check(&pages[0],
1324*4882a593Smuzhiyun req->dst.vir_w,
1325*4882a593Smuzhiyun req->dst.vir_h,
1326*4882a593Smuzhiyun req->dst.format,
1327*4882a593Smuzhiyun 2);
1328*4882a593Smuzhiyun #endif
1329*4882a593Smuzhiyun
1330*4882a593Smuzhiyun /* Save pagetable to invalid cache and unmap. */
1331*4882a593Smuzhiyun reg->MMU_dst_base = MMU_Base + Src0MemSize + Src1MemSize;
1332*4882a593Smuzhiyun reg->MMU_dst_count = DstPageCount;
1333*4882a593Smuzhiyun }
1334*4882a593Smuzhiyun
1335*4882a593Smuzhiyun if (ret < 0) {
1336*4882a593Smuzhiyun pr_err("rga2 map dst memory failed\n");
1337*4882a593Smuzhiyun status = ret;
1338*4882a593Smuzhiyun goto out;
1339*4882a593Smuzhiyun }
1340*4882a593Smuzhiyun /* change the buf address in req struct */
1341*4882a593Smuzhiyun req->mmu_info.dst_base_addr = ((unsigned long)(MMU_Base_phys
1342*4882a593Smuzhiyun + Src0MemSize + Src1MemSize));
1343*4882a593Smuzhiyun req->dst.yrgb_addr = (req->dst.yrgb_addr & (~PAGE_MASK));
1344*4882a593Smuzhiyun uv_size = (req->dst.uv_addr
1345*4882a593Smuzhiyun - (DstStart << PAGE_SHIFT)) >> PAGE_SHIFT;
1346*4882a593Smuzhiyun v_size = (req->dst.v_addr
1347*4882a593Smuzhiyun - (DstStart << PAGE_SHIFT)) >> PAGE_SHIFT;
1348*4882a593Smuzhiyun req->dst.uv_addr = (req->dst.uv_addr & (~PAGE_MASK)) |
1349*4882a593Smuzhiyun ((uv_size) << PAGE_SHIFT);
1350*4882a593Smuzhiyun req->dst.v_addr = (req->dst.v_addr & (~PAGE_MASK)) |
1351*4882a593Smuzhiyun ((v_size) << PAGE_SHIFT);
1352*4882a593Smuzhiyun
1353*4882a593Smuzhiyun if (((req->alpha_rop_flag & 1) == 1) && (req->bitblt_mode == 0)) {
1354*4882a593Smuzhiyun req->mmu_info.src1_base_addr = req->mmu_info.dst_base_addr;
1355*4882a593Smuzhiyun req->mmu_info.src1_mmu_flag = req->mmu_info.dst_mmu_flag;
1356*4882a593Smuzhiyun }
1357*4882a593Smuzhiyun }
1358*4882a593Smuzhiyun
1359*4882a593Smuzhiyun /* flush data to DDR */
1360*4882a593Smuzhiyun rga2_dma_flush_range(MMU_Base, (MMU_Base + AllSize));
1361*4882a593Smuzhiyun rga2_mmu_buf_get(&rga2_mmu_buf, AllSize);
1362*4882a593Smuzhiyun reg->MMU_len = AllSize;
1363*4882a593Smuzhiyun status = 0;
1364*4882a593Smuzhiyun out:
1365*4882a593Smuzhiyun return status;
1366*4882a593Smuzhiyun }
1367*4882a593Smuzhiyun
rga2_mmu_info_color_palette_mode(struct rga2_reg * reg,struct rga2_req * req)1368*4882a593Smuzhiyun static int rga2_mmu_info_color_palette_mode(struct rga2_reg *reg, struct rga2_req *req)
1369*4882a593Smuzhiyun {
1370*4882a593Smuzhiyun int SrcMemSize, DstMemSize;
1371*4882a593Smuzhiyun unsigned long SrcStart, DstStart;
1372*4882a593Smuzhiyun unsigned long SrcPageCount, DstPageCount;
1373*4882a593Smuzhiyun struct page **pages = NULL;
1374*4882a593Smuzhiyun uint32_t uv_size, v_size;
1375*4882a593Smuzhiyun uint32_t AllSize;
1376*4882a593Smuzhiyun uint32_t *MMU_Base = NULL, *MMU_Base_phys;
1377*4882a593Smuzhiyun int ret, status;
1378*4882a593Smuzhiyun uint32_t stride;
1379*4882a593Smuzhiyun
1380*4882a593Smuzhiyun uint8_t shift;
1381*4882a593Smuzhiyun uint32_t sw, byte_num;
1382*4882a593Smuzhiyun struct rga_dma_buffer_t *dma_buffer = NULL;
1383*4882a593Smuzhiyun
1384*4882a593Smuzhiyun shift = 3 - (req->palette_mode & 3);
1385*4882a593Smuzhiyun sw = req->src.vir_w*req->src.vir_h;
1386*4882a593Smuzhiyun byte_num = sw >> shift;
1387*4882a593Smuzhiyun stride = (byte_num + 3) & (~3);
1388*4882a593Smuzhiyun
1389*4882a593Smuzhiyun SrcStart = 0;
1390*4882a593Smuzhiyun DstStart = 0;
1391*4882a593Smuzhiyun SrcPageCount = 0;
1392*4882a593Smuzhiyun DstPageCount = 0;
1393*4882a593Smuzhiyun SrcMemSize = 0;
1394*4882a593Smuzhiyun DstMemSize = 0;
1395*4882a593Smuzhiyun
1396*4882a593Smuzhiyun do {
1397*4882a593Smuzhiyun if (req->mmu_info.src0_mmu_flag) {
1398*4882a593Smuzhiyun if (req->mmu_info.els_mmu_flag & 1) {
1399*4882a593Smuzhiyun req->mmu_info.src0_mmu_flag = 0;
1400*4882a593Smuzhiyun req->mmu_info.src1_mmu_flag = 0;
1401*4882a593Smuzhiyun } else {
1402*4882a593Smuzhiyun req->mmu_info.els_mmu_flag = req->mmu_info.src0_mmu_flag;
1403*4882a593Smuzhiyun req->mmu_info.src0_mmu_flag = 0;
1404*4882a593Smuzhiyun }
1405*4882a593Smuzhiyun
1406*4882a593Smuzhiyun SrcPageCount = rga2_mem_size_cal(req->src.yrgb_addr, stride, &SrcStart);
1407*4882a593Smuzhiyun if(SrcPageCount == 0) {
1408*4882a593Smuzhiyun return -EINVAL;
1409*4882a593Smuzhiyun }
1410*4882a593Smuzhiyun }
1411*4882a593Smuzhiyun
1412*4882a593Smuzhiyun if (req->mmu_info.dst_mmu_flag) {
1413*4882a593Smuzhiyun DstPageCount = rga2_buf_size_cal(req->dst.yrgb_addr, req->dst.uv_addr, req->dst.v_addr,
1414*4882a593Smuzhiyun req->dst.format, req->dst.vir_w, req->dst.vir_h,
1415*4882a593Smuzhiyun &DstStart);
1416*4882a593Smuzhiyun if(DstPageCount == 0) {
1417*4882a593Smuzhiyun return -EINVAL;
1418*4882a593Smuzhiyun }
1419*4882a593Smuzhiyun }
1420*4882a593Smuzhiyun
1421*4882a593Smuzhiyun SrcMemSize = (SrcPageCount + 15) & (~15);
1422*4882a593Smuzhiyun DstMemSize = (DstPageCount + 15) & (~15);
1423*4882a593Smuzhiyun
1424*4882a593Smuzhiyun AllSize = SrcMemSize + DstMemSize;
1425*4882a593Smuzhiyun
1426*4882a593Smuzhiyun if (rga2_mmu_buf_get_try(&rga2_mmu_buf, AllSize)) {
1427*4882a593Smuzhiyun pr_err("RGA2 Get MMU mem failed\n");
1428*4882a593Smuzhiyun status = RGA2_MALLOC_ERROR;
1429*4882a593Smuzhiyun break;
1430*4882a593Smuzhiyun }
1431*4882a593Smuzhiyun
1432*4882a593Smuzhiyun pages = rga2_mmu_buf.pages;
1433*4882a593Smuzhiyun if(pages == NULL) {
1434*4882a593Smuzhiyun pr_err("RGA MMU malloc pages mem failed\n");
1435*4882a593Smuzhiyun return -EINVAL;
1436*4882a593Smuzhiyun }
1437*4882a593Smuzhiyun
1438*4882a593Smuzhiyun mutex_lock(&rga2_service.lock);
1439*4882a593Smuzhiyun MMU_Base = rga2_mmu_buf.buf_virtual + rga2_mmu_buf.front;
1440*4882a593Smuzhiyun MMU_Base_phys = rga2_mmu_buf.buf + rga2_mmu_buf.front;
1441*4882a593Smuzhiyun mutex_unlock(&rga2_service.lock);
1442*4882a593Smuzhiyun
1443*4882a593Smuzhiyun if(SrcMemSize) {
1444*4882a593Smuzhiyun dma_buffer = ®->dma_buffer_src0;
1445*4882a593Smuzhiyun
1446*4882a593Smuzhiyun if (dma_buffer->sgt) {
1447*4882a593Smuzhiyun ret = rga2_MapION(dma_buffer->sgt,
1448*4882a593Smuzhiyun &MMU_Base[0], SrcMemSize);
1449*4882a593Smuzhiyun } else {
1450*4882a593Smuzhiyun ret = rga2_MapUserMemory(&pages[0], &MMU_Base[0],
1451*4882a593Smuzhiyun SrcStart, SrcPageCount, 0, MMU_MAP_CLEAN);
1452*4882a593Smuzhiyun #ifdef CONFIG_ROCKCHIP_RGA2_DEBUGGER
1453*4882a593Smuzhiyun if (RGA2_CHECK_MODE)
1454*4882a593Smuzhiyun rga2_user_memory_check(&pages[0], req->src.vir_w,
1455*4882a593Smuzhiyun req->src.vir_h, req->src.format,
1456*4882a593Smuzhiyun 1);
1457*4882a593Smuzhiyun #endif
1458*4882a593Smuzhiyun }
1459*4882a593Smuzhiyun if (ret < 0) {
1460*4882a593Smuzhiyun pr_err("rga2 map src0 memory failed\n");
1461*4882a593Smuzhiyun status = ret;
1462*4882a593Smuzhiyun break;
1463*4882a593Smuzhiyun }
1464*4882a593Smuzhiyun
1465*4882a593Smuzhiyun /* change the buf address in req struct */
1466*4882a593Smuzhiyun req->mmu_info.els_base_addr = (((unsigned long)MMU_Base_phys));
1467*4882a593Smuzhiyun /*
1468*4882a593Smuzhiyun *The color palette mode will not have YUV format as input,
1469*4882a593Smuzhiyun *so UV component address is not needed
1470*4882a593Smuzhiyun */
1471*4882a593Smuzhiyun req->src.yrgb_addr = (req->src.yrgb_addr & (~PAGE_MASK));
1472*4882a593Smuzhiyun }
1473*4882a593Smuzhiyun
1474*4882a593Smuzhiyun if(DstMemSize) {
1475*4882a593Smuzhiyun dma_buffer = ®->dma_buffer_dst;
1476*4882a593Smuzhiyun
1477*4882a593Smuzhiyun if (dma_buffer->sgt) {
1478*4882a593Smuzhiyun ret = rga2_MapION(dma_buffer->sgt,
1479*4882a593Smuzhiyun MMU_Base + SrcMemSize, DstMemSize);
1480*4882a593Smuzhiyun } else {
1481*4882a593Smuzhiyun ret = rga2_MapUserMemory(&pages[0], MMU_Base + SrcMemSize,
1482*4882a593Smuzhiyun DstStart, DstPageCount, 1, MMU_MAP_INVALID);
1483*4882a593Smuzhiyun #ifdef CONFIG_ROCKCHIP_RGA2_DEBUGGER
1484*4882a593Smuzhiyun if (RGA2_CHECK_MODE)
1485*4882a593Smuzhiyun rga2_user_memory_check(&pages[0], req->dst.vir_w,
1486*4882a593Smuzhiyun req->dst.vir_h, req->dst.format,
1487*4882a593Smuzhiyun 1);
1488*4882a593Smuzhiyun #endif
1489*4882a593Smuzhiyun }
1490*4882a593Smuzhiyun if (ret < 0) {
1491*4882a593Smuzhiyun pr_err("rga2 map dst memory failed\n");
1492*4882a593Smuzhiyun status = ret;
1493*4882a593Smuzhiyun break;
1494*4882a593Smuzhiyun }
1495*4882a593Smuzhiyun /* change the buf address in req struct */
1496*4882a593Smuzhiyun req->mmu_info.dst_base_addr = ((unsigned long)(MMU_Base_phys + SrcMemSize));
1497*4882a593Smuzhiyun req->dst.yrgb_addr = (req->dst.yrgb_addr & (~PAGE_MASK));
1498*4882a593Smuzhiyun
1499*4882a593Smuzhiyun uv_size = (req->dst.uv_addr
1500*4882a593Smuzhiyun - (DstStart << PAGE_SHIFT)) >> PAGE_SHIFT;
1501*4882a593Smuzhiyun v_size = (req->dst.v_addr
1502*4882a593Smuzhiyun - (DstStart << PAGE_SHIFT)) >> PAGE_SHIFT;
1503*4882a593Smuzhiyun req->dst.uv_addr = (req->dst.uv_addr & (~PAGE_MASK)) |
1504*4882a593Smuzhiyun ((uv_size) << PAGE_SHIFT);
1505*4882a593Smuzhiyun req->dst.v_addr = (req->dst.v_addr & (~PAGE_MASK)) |
1506*4882a593Smuzhiyun ((v_size) << PAGE_SHIFT);
1507*4882a593Smuzhiyun }
1508*4882a593Smuzhiyun
1509*4882a593Smuzhiyun /* flush data to DDR */
1510*4882a593Smuzhiyun rga2_dma_flush_range(MMU_Base, (MMU_Base + AllSize));
1511*4882a593Smuzhiyun rga2_mmu_buf_get(&rga2_mmu_buf, AllSize);
1512*4882a593Smuzhiyun reg->MMU_len = AllSize;
1513*4882a593Smuzhiyun
1514*4882a593Smuzhiyun return 0;
1515*4882a593Smuzhiyun }
1516*4882a593Smuzhiyun while(0);
1517*4882a593Smuzhiyun
1518*4882a593Smuzhiyun return 0;
1519*4882a593Smuzhiyun }
1520*4882a593Smuzhiyun
rga2_mmu_info_color_fill_mode(struct rga2_reg * reg,struct rga2_req * req)1521*4882a593Smuzhiyun static int rga2_mmu_info_color_fill_mode(struct rga2_reg *reg, struct rga2_req *req)
1522*4882a593Smuzhiyun {
1523*4882a593Smuzhiyun int DstMemSize;
1524*4882a593Smuzhiyun unsigned long DstStart;
1525*4882a593Smuzhiyun unsigned long DstPageCount;
1526*4882a593Smuzhiyun struct page **pages = NULL;
1527*4882a593Smuzhiyun uint32_t uv_size, v_size;
1528*4882a593Smuzhiyun uint32_t AllSize;
1529*4882a593Smuzhiyun uint32_t *MMU_Base, *MMU_Base_phys;
1530*4882a593Smuzhiyun int ret;
1531*4882a593Smuzhiyun int status;
1532*4882a593Smuzhiyun struct rga_dma_buffer_t *dma_buffer = NULL;
1533*4882a593Smuzhiyun
1534*4882a593Smuzhiyun DstMemSize = 0;
1535*4882a593Smuzhiyun DstPageCount = 0;
1536*4882a593Smuzhiyun DstStart = 0;
1537*4882a593Smuzhiyun MMU_Base = NULL;
1538*4882a593Smuzhiyun
1539*4882a593Smuzhiyun do {
1540*4882a593Smuzhiyun if(req->mmu_info.dst_mmu_flag & 1) {
1541*4882a593Smuzhiyun DstPageCount = rga2_buf_size_cal(req->dst.yrgb_addr, req->dst.uv_addr, req->dst.v_addr,
1542*4882a593Smuzhiyun req->dst.format, req->dst.vir_w, req->dst.vir_h,
1543*4882a593Smuzhiyun &DstStart);
1544*4882a593Smuzhiyun if(DstPageCount == 0) {
1545*4882a593Smuzhiyun return -EINVAL;
1546*4882a593Smuzhiyun }
1547*4882a593Smuzhiyun }
1548*4882a593Smuzhiyun
1549*4882a593Smuzhiyun DstMemSize = (DstPageCount + 15) & (~15);
1550*4882a593Smuzhiyun AllSize = DstMemSize;
1551*4882a593Smuzhiyun
1552*4882a593Smuzhiyun if(rga2_mmu_buf_get_try(&rga2_mmu_buf, AllSize)) {
1553*4882a593Smuzhiyun pr_err("RGA2 Get MMU mem failed\n");
1554*4882a593Smuzhiyun status = RGA2_MALLOC_ERROR;
1555*4882a593Smuzhiyun break;
1556*4882a593Smuzhiyun }
1557*4882a593Smuzhiyun
1558*4882a593Smuzhiyun pages = rga2_mmu_buf.pages;
1559*4882a593Smuzhiyun if(pages == NULL) {
1560*4882a593Smuzhiyun pr_err("RGA MMU malloc pages mem failed\n");
1561*4882a593Smuzhiyun return -EINVAL;
1562*4882a593Smuzhiyun }
1563*4882a593Smuzhiyun
1564*4882a593Smuzhiyun mutex_lock(&rga2_service.lock);
1565*4882a593Smuzhiyun MMU_Base_phys = rga2_mmu_buf.buf + rga2_mmu_buf.front;
1566*4882a593Smuzhiyun MMU_Base = rga2_mmu_buf.buf_virtual + rga2_mmu_buf.front;
1567*4882a593Smuzhiyun mutex_unlock(&rga2_service.lock);
1568*4882a593Smuzhiyun
1569*4882a593Smuzhiyun if (DstMemSize) {
1570*4882a593Smuzhiyun dma_buffer = ®->dma_buffer_dst;
1571*4882a593Smuzhiyun
1572*4882a593Smuzhiyun if (dma_buffer->sgt) {
1573*4882a593Smuzhiyun ret = rga2_MapION(dma_buffer->sgt, &MMU_Base[0], DstMemSize);
1574*4882a593Smuzhiyun }
1575*4882a593Smuzhiyun else {
1576*4882a593Smuzhiyun ret = rga2_MapUserMemory(&pages[0], &MMU_Base[0],
1577*4882a593Smuzhiyun DstStart, DstPageCount,
1578*4882a593Smuzhiyun 1, MMU_MAP_INVALID);
1579*4882a593Smuzhiyun }
1580*4882a593Smuzhiyun if (ret < 0) {
1581*4882a593Smuzhiyun pr_err("rga2 map dst memory failed\n");
1582*4882a593Smuzhiyun status = ret;
1583*4882a593Smuzhiyun break;
1584*4882a593Smuzhiyun }
1585*4882a593Smuzhiyun
1586*4882a593Smuzhiyun /* change the buf address in req struct */
1587*4882a593Smuzhiyun req->mmu_info.dst_base_addr = ((unsigned long)MMU_Base_phys);
1588*4882a593Smuzhiyun req->dst.yrgb_addr = (req->dst.yrgb_addr & (~PAGE_MASK));
1589*4882a593Smuzhiyun
1590*4882a593Smuzhiyun uv_size = (req->dst.uv_addr
1591*4882a593Smuzhiyun - (DstStart << PAGE_SHIFT)) >> PAGE_SHIFT;
1592*4882a593Smuzhiyun v_size = (req->dst.v_addr
1593*4882a593Smuzhiyun - (DstStart << PAGE_SHIFT)) >> PAGE_SHIFT;
1594*4882a593Smuzhiyun req->dst.uv_addr = (req->dst.uv_addr & (~PAGE_MASK)) |
1595*4882a593Smuzhiyun ((uv_size) << PAGE_SHIFT);
1596*4882a593Smuzhiyun req->dst.v_addr = (req->dst.v_addr & (~PAGE_MASK)) |
1597*4882a593Smuzhiyun ((v_size) << PAGE_SHIFT);
1598*4882a593Smuzhiyun }
1599*4882a593Smuzhiyun
1600*4882a593Smuzhiyun /* flush data to DDR */
1601*4882a593Smuzhiyun rga2_dma_flush_range(MMU_Base, (MMU_Base + AllSize + 1));
1602*4882a593Smuzhiyun rga2_mmu_buf_get(&rga2_mmu_buf, AllSize);
1603*4882a593Smuzhiyun reg->MMU_len = AllSize;
1604*4882a593Smuzhiyun
1605*4882a593Smuzhiyun return 0;
1606*4882a593Smuzhiyun }
1607*4882a593Smuzhiyun while(0);
1608*4882a593Smuzhiyun
1609*4882a593Smuzhiyun return status;
1610*4882a593Smuzhiyun }
1611*4882a593Smuzhiyun
1612*4882a593Smuzhiyun
rga2_mmu_info_update_palette_table_mode(struct rga2_reg * reg,struct rga2_req * req)1613*4882a593Smuzhiyun static int rga2_mmu_info_update_palette_table_mode(struct rga2_reg *reg, struct rga2_req *req)
1614*4882a593Smuzhiyun {
1615*4882a593Smuzhiyun int LutMemSize;
1616*4882a593Smuzhiyun unsigned long LutStart;
1617*4882a593Smuzhiyun unsigned long LutPageCount;
1618*4882a593Smuzhiyun struct page **pages = NULL;
1619*4882a593Smuzhiyun uint32_t uv_size, v_size;
1620*4882a593Smuzhiyun uint32_t AllSize;
1621*4882a593Smuzhiyun uint32_t *MMU_Base, *MMU_Base_phys;
1622*4882a593Smuzhiyun int ret, status;
1623*4882a593Smuzhiyun struct rga_dma_buffer_t *dma_buffer = NULL;
1624*4882a593Smuzhiyun
1625*4882a593Smuzhiyun MMU_Base = NULL;
1626*4882a593Smuzhiyun LutPageCount = 0;
1627*4882a593Smuzhiyun LutMemSize = 0;
1628*4882a593Smuzhiyun LutStart = 0;
1629*4882a593Smuzhiyun
1630*4882a593Smuzhiyun do {
1631*4882a593Smuzhiyun /* cal lut buf mmu info */
1632*4882a593Smuzhiyun if (req->mmu_info.els_mmu_flag & 1) {
1633*4882a593Smuzhiyun req->mmu_info.src0_mmu_flag = req->mmu_info.src0_mmu_flag == 1 ? 0 : req->mmu_info.src0_mmu_flag;
1634*4882a593Smuzhiyun req->mmu_info.src1_mmu_flag = req->mmu_info.src1_mmu_flag == 1 ? 0 : req->mmu_info.src1_mmu_flag;
1635*4882a593Smuzhiyun req->mmu_info.dst_mmu_flag = req->mmu_info.dst_mmu_flag == 1 ? 0 : req->mmu_info.dst_mmu_flag;
1636*4882a593Smuzhiyun
1637*4882a593Smuzhiyun LutPageCount = rga2_buf_size_cal(req->pat.yrgb_addr, req->pat.uv_addr, req->pat.v_addr,
1638*4882a593Smuzhiyun req->pat.format, req->pat.vir_w, req->pat.vir_h,
1639*4882a593Smuzhiyun &LutStart);
1640*4882a593Smuzhiyun if(LutPageCount == 0) {
1641*4882a593Smuzhiyun return -EINVAL;
1642*4882a593Smuzhiyun }
1643*4882a593Smuzhiyun }
1644*4882a593Smuzhiyun
1645*4882a593Smuzhiyun LutMemSize = (LutPageCount + 15) & (~15);
1646*4882a593Smuzhiyun AllSize = LutMemSize;
1647*4882a593Smuzhiyun
1648*4882a593Smuzhiyun if (rga2_mmu_buf_get_try(&rga2_mmu_buf, AllSize)) {
1649*4882a593Smuzhiyun pr_err("RGA2 Get MMU mem failed\n");
1650*4882a593Smuzhiyun status = RGA2_MALLOC_ERROR;
1651*4882a593Smuzhiyun break;
1652*4882a593Smuzhiyun }
1653*4882a593Smuzhiyun
1654*4882a593Smuzhiyun pages = rga2_mmu_buf.pages;
1655*4882a593Smuzhiyun if (pages == NULL) {
1656*4882a593Smuzhiyun pr_err("RGA MMU malloc pages mem failed\n");
1657*4882a593Smuzhiyun return -EINVAL;
1658*4882a593Smuzhiyun }
1659*4882a593Smuzhiyun
1660*4882a593Smuzhiyun mutex_lock(&rga2_service.lock);
1661*4882a593Smuzhiyun MMU_Base = rga2_mmu_buf.buf_virtual + rga2_mmu_buf.front;
1662*4882a593Smuzhiyun MMU_Base_phys = rga2_mmu_buf.buf + rga2_mmu_buf.front;
1663*4882a593Smuzhiyun mutex_unlock(&rga2_service.lock);
1664*4882a593Smuzhiyun
1665*4882a593Smuzhiyun if (LutMemSize) {
1666*4882a593Smuzhiyun dma_buffer = ®->dma_buffer_els;
1667*4882a593Smuzhiyun
1668*4882a593Smuzhiyun if (dma_buffer->sgt) {
1669*4882a593Smuzhiyun ret = rga2_MapION(dma_buffer->sgt,
1670*4882a593Smuzhiyun &MMU_Base[0], LutMemSize);
1671*4882a593Smuzhiyun } else {
1672*4882a593Smuzhiyun ret = rga2_MapUserMemory(&pages[0], &MMU_Base[0],
1673*4882a593Smuzhiyun LutStart, LutPageCount, 0, MMU_MAP_CLEAN);
1674*4882a593Smuzhiyun }
1675*4882a593Smuzhiyun if (ret < 0) {
1676*4882a593Smuzhiyun pr_err("rga2 map palette memory failed\n");
1677*4882a593Smuzhiyun status = ret;
1678*4882a593Smuzhiyun break;
1679*4882a593Smuzhiyun }
1680*4882a593Smuzhiyun
1681*4882a593Smuzhiyun /* change the buf address in req struct */
1682*4882a593Smuzhiyun req->mmu_info.els_base_addr = (((unsigned long)MMU_Base_phys));
1683*4882a593Smuzhiyun
1684*4882a593Smuzhiyun req->pat.yrgb_addr = (req->pat.yrgb_addr & (~PAGE_MASK));
1685*4882a593Smuzhiyun
1686*4882a593Smuzhiyun uv_size = (req->pat.uv_addr
1687*4882a593Smuzhiyun - (LutStart << PAGE_SHIFT)) >> PAGE_SHIFT;
1688*4882a593Smuzhiyun v_size = (req->pat.v_addr
1689*4882a593Smuzhiyun - (LutStart << PAGE_SHIFT)) >> PAGE_SHIFT;
1690*4882a593Smuzhiyun req->pat.uv_addr = (req->pat.uv_addr & (~PAGE_MASK)) |
1691*4882a593Smuzhiyun ((uv_size) << PAGE_SHIFT);
1692*4882a593Smuzhiyun req->pat.v_addr = (req->pat.v_addr & (~PAGE_MASK)) |
1693*4882a593Smuzhiyun ((v_size) << PAGE_SHIFT);
1694*4882a593Smuzhiyun }
1695*4882a593Smuzhiyun
1696*4882a593Smuzhiyun /* flush data to DDR */
1697*4882a593Smuzhiyun rga2_dma_flush_range(MMU_Base, (MMU_Base + AllSize));
1698*4882a593Smuzhiyun rga2_mmu_buf_get(&rga2_mmu_buf, AllSize);
1699*4882a593Smuzhiyun reg->MMU_len = AllSize;
1700*4882a593Smuzhiyun
1701*4882a593Smuzhiyun return 0;
1702*4882a593Smuzhiyun }
1703*4882a593Smuzhiyun while(0);
1704*4882a593Smuzhiyun
1705*4882a593Smuzhiyun return status;
1706*4882a593Smuzhiyun }
1707*4882a593Smuzhiyun
1708*4882a593Smuzhiyun /*
1709*4882a593Smuzhiyun * yqw:
1710*4882a593Smuzhiyun * This function is currently not sure whether rga2 is used,
1711*4882a593Smuzhiyun * because invalidate/clean cache occupies the parameter
1712*4882a593Smuzhiyun * reg->MMU_base, so block this function first, and re-implement
1713*4882a593Smuzhiyun * this function if necessary.
1714*4882a593Smuzhiyun */
1715*4882a593Smuzhiyun #if 0
1716*4882a593Smuzhiyun static int rga2_mmu_info_update_patten_buff_mode(struct rga2_reg *reg, struct rga2_req *req)
1717*4882a593Smuzhiyun {
1718*4882a593Smuzhiyun int SrcMemSize, CMDMemSize;
1719*4882a593Smuzhiyun unsigned long SrcStart, CMDStart;
1720*4882a593Smuzhiyun struct page **pages = NULL;
1721*4882a593Smuzhiyun uint32_t i;
1722*4882a593Smuzhiyun uint32_t AllSize;
1723*4882a593Smuzhiyun uint32_t *MMU_Base, *MMU_p;
1724*4882a593Smuzhiyun int ret, status;
1725*4882a593Smuzhiyun
1726*4882a593Smuzhiyun MMU_Base = MMU_p = 0;
1727*4882a593Smuzhiyun
1728*4882a593Smuzhiyun do {
1729*4882a593Smuzhiyun /* cal src buf mmu info */
1730*4882a593Smuzhiyun SrcMemSize = rga2_mem_size_cal(req->pat.yrgb_addr, req->pat.act_w * req->pat.act_h * 4, &SrcStart);
1731*4882a593Smuzhiyun if(SrcMemSize == 0) {
1732*4882a593Smuzhiyun return -EINVAL;
1733*4882a593Smuzhiyun }
1734*4882a593Smuzhiyun
1735*4882a593Smuzhiyun /* cal cmd buf mmu info */
1736*4882a593Smuzhiyun CMDMemSize = rga2_mem_size_cal((unsigned long)rga2_service.cmd_buff, RGA2_CMD_BUF_SIZE, &CMDStart);
1737*4882a593Smuzhiyun if(CMDMemSize == 0) {
1738*4882a593Smuzhiyun return -EINVAL;
1739*4882a593Smuzhiyun }
1740*4882a593Smuzhiyun
1741*4882a593Smuzhiyun AllSize = SrcMemSize + CMDMemSize;
1742*4882a593Smuzhiyun
1743*4882a593Smuzhiyun pages = rga2_mmu_buf.pages;
1744*4882a593Smuzhiyun
1745*4882a593Smuzhiyun MMU_Base = kzalloc(AllSize * sizeof(uint32_t), GFP_KERNEL);
1746*4882a593Smuzhiyun if (MMU_Base == NULL)
1747*4882a593Smuzhiyun return -EINVAL;
1748*4882a593Smuzhiyun
1749*4882a593Smuzhiyun for(i=0; i<CMDMemSize; i++) {
1750*4882a593Smuzhiyun MMU_Base[i] = virt_to_phys((uint32_t *)((CMDStart + i) << PAGE_SHIFT));
1751*4882a593Smuzhiyun }
1752*4882a593Smuzhiyun
1753*4882a593Smuzhiyun if (req->src.yrgb_addr < KERNEL_SPACE_VALID)
1754*4882a593Smuzhiyun {
1755*4882a593Smuzhiyun ret = rga2_MapUserMemory(&pages[CMDMemSize],
1756*4882a593Smuzhiyun &MMU_Base[CMDMemSize],
1757*4882a593Smuzhiyun SrcStart, SrcMemSize,
1758*4882a593Smuzhiyun 1, MMU_MAP_CLEAN);
1759*4882a593Smuzhiyun if (ret < 0) {
1760*4882a593Smuzhiyun pr_err("rga map src memory failed\n");
1761*4882a593Smuzhiyun status = ret;
1762*4882a593Smuzhiyun break;
1763*4882a593Smuzhiyun }
1764*4882a593Smuzhiyun }
1765*4882a593Smuzhiyun else
1766*4882a593Smuzhiyun {
1767*4882a593Smuzhiyun MMU_p = MMU_Base + CMDMemSize;
1768*4882a593Smuzhiyun
1769*4882a593Smuzhiyun for(i=0; i<SrcMemSize; i++)
1770*4882a593Smuzhiyun {
1771*4882a593Smuzhiyun MMU_p[i] = (uint32_t)virt_to_phys((uint32_t *)((SrcStart + i) << PAGE_SHIFT));
1772*4882a593Smuzhiyun }
1773*4882a593Smuzhiyun }
1774*4882a593Smuzhiyun
1775*4882a593Smuzhiyun /* zsq
1776*4882a593Smuzhiyun * change the buf address in req struct
1777*4882a593Smuzhiyun * for the reason of lie to MMU
1778*4882a593Smuzhiyun */
1779*4882a593Smuzhiyun req->mmu_info.src0_base_addr = (virt_to_phys(MMU_Base) >> 2);
1780*4882a593Smuzhiyun
1781*4882a593Smuzhiyun req->src.yrgb_addr = (req->src.yrgb_addr & (~PAGE_MASK)) | (CMDMemSize << PAGE_SHIFT);
1782*4882a593Smuzhiyun
1783*4882a593Smuzhiyun /*record the malloc buf for the cmd end to release*/
1784*4882a593Smuzhiyun reg->MMU_base = MMU_Base;
1785*4882a593Smuzhiyun
1786*4882a593Smuzhiyun /* flush data to DDR */
1787*4882a593Smuzhiyun rga2_dma_flush_range(MMU_Base, (MMU_Base + AllSize));
1788*4882a593Smuzhiyun return 0;
1789*4882a593Smuzhiyun
1790*4882a593Smuzhiyun }
1791*4882a593Smuzhiyun while(0);
1792*4882a593Smuzhiyun
1793*4882a593Smuzhiyun return status;
1794*4882a593Smuzhiyun }
1795*4882a593Smuzhiyun #endif
1796*4882a593Smuzhiyun
rga2_set_mmu_info(struct rga2_reg * reg,struct rga2_req * req)1797*4882a593Smuzhiyun int rga2_set_mmu_info(struct rga2_reg *reg, struct rga2_req *req)
1798*4882a593Smuzhiyun {
1799*4882a593Smuzhiyun int ret;
1800*4882a593Smuzhiyun
1801*4882a593Smuzhiyun if (reg->MMU_map == true) {
1802*4882a593Smuzhiyun ret = rga2_mmu_flush_cache(reg, req);
1803*4882a593Smuzhiyun return ret;
1804*4882a593Smuzhiyun }
1805*4882a593Smuzhiyun
1806*4882a593Smuzhiyun switch (req->render_mode) {
1807*4882a593Smuzhiyun case bitblt_mode :
1808*4882a593Smuzhiyun ret = rga2_mmu_info_BitBlt_mode(reg, req);
1809*4882a593Smuzhiyun break;
1810*4882a593Smuzhiyun case color_palette_mode :
1811*4882a593Smuzhiyun ret = rga2_mmu_info_color_palette_mode(reg, req);
1812*4882a593Smuzhiyun break;
1813*4882a593Smuzhiyun case color_fill_mode :
1814*4882a593Smuzhiyun ret = rga2_mmu_info_color_fill_mode(reg, req);
1815*4882a593Smuzhiyun break;
1816*4882a593Smuzhiyun case update_palette_table_mode :
1817*4882a593Smuzhiyun ret = rga2_mmu_info_update_palette_table_mode(reg, req);
1818*4882a593Smuzhiyun break;
1819*4882a593Smuzhiyun #if 0
1820*4882a593Smuzhiyun case update_patten_buff_mode :
1821*4882a593Smuzhiyun ret = rga2_mmu_info_update_patten_buff_mode(reg, req);
1822*4882a593Smuzhiyun break;
1823*4882a593Smuzhiyun #endif
1824*4882a593Smuzhiyun default :
1825*4882a593Smuzhiyun ret = -1;
1826*4882a593Smuzhiyun break;
1827*4882a593Smuzhiyun }
1828*4882a593Smuzhiyun
1829*4882a593Smuzhiyun return ret;
1830*4882a593Smuzhiyun }
1831*4882a593Smuzhiyun
1832