1 /* SPDX-License-Identifier: GPL-2.0 */
2
3 #define pr_fmt(fmt) "rga2_mmu: " fmt
4 #include <linux/version.h>
5 #include <linux/init.h>
6 #include <linux/module.h>
7 #include <linux/fs.h>
8 #include <linux/sched.h>
9 #include <linux/signal.h>
10 #include <linux/pagemap.h>
11 #include <linux/seq_file.h>
12 #include <linux/mm.h>
13 #include <linux/mman.h>
14 #include <linux/sched.h>
15 #include <linux/slab.h>
16 #include <linux/memory.h>
17 #include <linux/dma-mapping.h>
18 #include <linux/scatterlist.h>
19 #include <asm/memory.h>
20 #include <asm/atomic.h>
21 #include <asm/cacheflush.h>
22 #if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 4, 0))
23 #include <linux/rockchip_ion.h>
24 #endif
25 #include "rga2_mmu_info.h"
26 #include "rga2_debugger.h"
27
28 extern struct rga2_service_info rga2_service;
29 extern struct rga2_mmu_buf_t rga2_mmu_buf;
30 extern struct rga2_drvdata_t *rga2_drvdata;
31
32 //extern int mmu_buff_temp[1024];
33
34 #define KERNEL_SPACE_VALID 0xc0000000
35
36 #define V7_VATOPA_SUCESS_MASK (0x1)
37 #define V7_VATOPA_GET_PADDR(X) (X & 0xFFFFF000)
38 #define V7_VATOPA_GET_INER(X) ((X>>4) & 7)
39 #define V7_VATOPA_GET_OUTER(X) ((X>>2) & 3)
40 #define V7_VATOPA_GET_SH(X) ((X>>7) & 1)
41 #define V7_VATOPA_GET_NS(X) ((X>>9) & 1)
42 #define V7_VATOPA_GET_SS(X) ((X>>1) & 1)
43
rga2_dma_flush_range(void * pstart,void * pend)44 void rga2_dma_flush_range(void *pstart, void *pend)
45 {
46 dma_sync_single_for_device(rga2_drvdata->dev, virt_to_phys(pstart), pend - pstart, DMA_TO_DEVICE);
47 }
48
rga2_dma_flush_page(struct page * page,int map)49 dma_addr_t rga2_dma_flush_page(struct page *page, int map)
50 {
51 dma_addr_t paddr;
52
53 /*
54 * Through dma_map_page to ensure that the physical address
55 * will not exceed the addressing range of dma.
56 */
57 if (map & MMU_MAP_MASK) {
58 switch (map) {
59 case MMU_MAP_CLEAN:
60 paddr = dma_map_page(rga2_drvdata->dev, page, 0,
61 PAGE_SIZE, DMA_TO_DEVICE);
62 break;
63 case MMU_MAP_INVALID:
64 paddr = dma_map_page(rga2_drvdata->dev, page, 0,
65 PAGE_SIZE, DMA_FROM_DEVICE);
66 break;
67 case MMU_MAP_CLEAN | MMU_MAP_INVALID:
68 paddr = dma_map_page(rga2_drvdata->dev, page, 0,
69 PAGE_SIZE, DMA_BIDIRECTIONAL);
70 break;
71 default:
72 paddr = 0;
73 pr_err("unknown map cmd 0x%x\n", map);
74 break;
75 }
76
77 return paddr;
78 } else if (map & MMU_UNMAP_MASK) {
79 paddr = page_to_phys(page);
80
81 switch (map) {
82 case MMU_UNMAP_CLEAN:
83 dma_unmap_page(rga2_drvdata->dev, paddr,
84 PAGE_SIZE, DMA_TO_DEVICE);
85 break;
86 case MMU_UNMAP_INVALID:
87 dma_unmap_page(rga2_drvdata->dev, paddr,
88 PAGE_SIZE, DMA_FROM_DEVICE);
89 break;
90 case MMU_UNMAP_CLEAN | MMU_UNMAP_INVALID:
91 dma_unmap_page(rga2_drvdata->dev, paddr,
92 PAGE_SIZE, DMA_BIDIRECTIONAL);
93 break;
94 default:
95 pr_err("unknown map cmd 0x%x\n", map);
96 break;
97 }
98
99 return paddr;
100 }
101
102 pr_err("RGA2 failed to flush page, map= %x\n", map);
103 return 0;
104 }
105
106 #if 0
107 static unsigned int armv7_va_to_pa(unsigned int v_addr)
108 {
109 unsigned int p_addr;
110 __asm__ volatile ( "mcr p15, 0, %1, c7, c8, 0\n"
111 "isb\n"
112 "dsb\n"
113 "mrc p15, 0, %0, c7, c4, 0\n"
114 : "=r" (p_addr)
115 : "r" (v_addr)
116 : "cc");
117
118 if (p_addr & V7_VATOPA_SUCESS_MASK)
119 return 0xFFFFFFFF;
120 else
121 return (V7_VATOPA_GET_SS(p_addr) ? 0xFFFFFFFF : V7_VATOPA_GET_PADDR(p_addr));
122 }
123 #endif
124
rga2_is_yuv422p_format(u32 format)125 static bool rga2_is_yuv422p_format(u32 format)
126 {
127 bool ret = false;
128
129 switch (format) {
130 case RGA2_FORMAT_YCbCr_422_P:
131 case RGA2_FORMAT_YCrCb_422_P:
132 ret = true;
133 break;
134 }
135 return ret;
136 }
137
138 #ifdef CONFIG_ROCKCHIP_RGA2_DEBUGGER
rga2_get_format_bits(u32 format)139 static int rga2_get_format_bits(u32 format)
140 {
141 int bits = 0;
142
143 switch (format) {
144 case RGA2_FORMAT_RGBA_8888:
145 case RGA2_FORMAT_RGBX_8888:
146 case RGA2_FORMAT_BGRA_8888:
147 case RGA2_FORMAT_BGRX_8888:
148 case RGA2_FORMAT_ARGB_8888:
149 case RGA2_FORMAT_XRGB_8888:
150 case RGA2_FORMAT_ABGR_8888:
151 case RGA2_FORMAT_XBGR_8888:
152 bits = 32;
153 break;
154 case RGA2_FORMAT_RGB_888:
155 case RGA2_FORMAT_BGR_888:
156 bits = 24;
157 break;
158 case RGA2_FORMAT_RGB_565:
159 case RGA2_FORMAT_RGBA_5551:
160 case RGA2_FORMAT_RGBA_4444:
161 case RGA2_FORMAT_BGR_565:
162 case RGA2_FORMAT_YCbCr_422_SP:
163 case RGA2_FORMAT_YCbCr_422_P:
164 case RGA2_FORMAT_YCrCb_422_SP:
165 case RGA2_FORMAT_YCrCb_422_P:
166 case RGA2_FORMAT_BGRA_5551:
167 case RGA2_FORMAT_BGRA_4444:
168 case RGA2_FORMAT_ARGB_5551:
169 case RGA2_FORMAT_ARGB_4444:
170 case RGA2_FORMAT_ABGR_5551:
171 case RGA2_FORMAT_ABGR_4444:
172 bits = 16;
173 break;
174 case RGA2_FORMAT_YCbCr_420_SP:
175 case RGA2_FORMAT_YCbCr_420_P:
176 case RGA2_FORMAT_YCrCb_420_SP:
177 case RGA2_FORMAT_YCrCb_420_P:
178 bits = 12;
179 break;
180 case RGA2_FORMAT_YCbCr_420_SP_10B:
181 case RGA2_FORMAT_YCrCb_420_SP_10B:
182 case RGA2_FORMAT_YCbCr_422_SP_10B:
183 case RGA2_FORMAT_YCrCb_422_SP_10B:
184 bits = 15;
185 break;
186 default:
187 pr_err("unknown format [%d]\n", format);
188 return -1;
189 }
190
191 return bits;
192 }
rga2_user_memory_check(struct page ** pages,u32 w,u32 h,u32 format,int flag)193 static int rga2_user_memory_check(struct page **pages, u32 w, u32 h, u32 format, int flag)
194 {
195 int bits;
196 void *vaddr = NULL;
197 int taipage_num;
198 int taidata_num;
199 int *tai_vaddr = NULL;
200
201 bits = rga2_get_format_bits(format);
202 if (bits < 0)
203 return -1;
204
205 taipage_num = w * h * bits / 8 / (1024 * 4);
206 taidata_num = w * h * bits / 8 % (1024 * 4);
207 if (taidata_num == 0) {
208 vaddr = kmap(pages[taipage_num - 1]);
209 tai_vaddr = (int *)vaddr + 1023;
210 } else {
211 vaddr = kmap(pages[taipage_num]);
212 tai_vaddr = (int *)vaddr + taidata_num / 4 - 1;
213 }
214
215 if (flag == 1) {
216 pr_info("src user memory check\n");
217 pr_info("tai data is %d\n", *tai_vaddr);
218 } else {
219 pr_info("dst user memory check\n");
220 pr_info("tai data is %d\n", *tai_vaddr);
221 }
222
223 if (taidata_num == 0)
224 kunmap(pages[taipage_num - 1]);
225 else
226 kunmap(pages[taipage_num]);
227
228 return 0;
229 }
230
rga2_virtual_memory_check(void * vaddr,u32 w,u32 h,u32 format,int fd)231 static int rga2_virtual_memory_check(void *vaddr, u32 w, u32 h, u32 format, int fd)
232 {
233 int bits = 32;
234 int temp_data = 0;
235 void *one_line = NULL;
236
237 bits = rga2_get_format_bits(format);
238 if (bits < 0)
239 return -1;
240
241 one_line = kzalloc(w * 4, GFP_KERNEL);
242 if (!one_line) {
243 ERR("kzalloc fail %s[%d]\n", __func__, __LINE__);
244 return 0;
245 }
246
247 temp_data = w * (h - 1) * bits >> 3;
248 if (fd > 0) {
249 INFO("vaddr is%p, bits is %d, fd check\n", vaddr, bits);
250 memcpy(one_line, (char *)vaddr + temp_data, w * bits >> 3);
251 INFO("fd check ok\n");
252 } else {
253 INFO("vir addr memory check.\n");
254 memcpy((void *)((char *)vaddr + temp_data), one_line,
255 w * bits >> 3);
256 INFO("vir addr check ok.\n");
257 }
258
259 kfree(one_line);
260 return 0;
261 }
262
rga2_dma_memory_check(struct rga_dma_buffer_t * buffer,struct rga_img_info_t * img)263 static int rga2_dma_memory_check(struct rga_dma_buffer_t *buffer,
264 struct rga_img_info_t *img)
265 {
266 int ret = 0;
267 void *vaddr;
268 struct dma_buf *dma_buffer;
269
270 dma_buffer = buffer->dma_buf;
271
272 if (!IS_ERR_OR_NULL(dma_buffer)) {
273 vaddr = dma_buf_vmap(dma_buffer);
274 if (vaddr) {
275 ret = rga2_virtual_memory_check(vaddr, img->vir_w, img->vir_h,
276 img->format, img->yrgb_addr);
277 } else {
278 pr_err("can't vmap the dma buffer!\n");
279 return -EINVAL;
280 }
281
282 dma_buf_vunmap(dma_buffer, vaddr);
283 }
284
285 return ret;
286 }
287 #endif
288
289 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0))
rga2_map_dma_buffer(int fd,struct rga_dma_buffer_t * rga_dma_buffer,enum dma_data_direction dir)290 static int rga2_map_dma_buffer(int fd,
291 struct rga_dma_buffer_t *rga_dma_buffer,
292 enum dma_data_direction dir)
293 {
294 struct device *rga_dev = NULL;
295 struct dma_buf *dma_buf = NULL;
296 struct dma_buf_attachment *attach = NULL;
297 struct sg_table *sgt = NULL;
298 int ret = 0;
299
300 rga_dev = rga2_drvdata->dev;
301
302 dma_buf = dma_buf_get(fd);
303 if (IS_ERR(dma_buf)) {
304 ret = -EINVAL;
305 pr_err("dma_buf_get fail fd[%d]\n", fd);
306 return ret;
307 }
308
309 attach = dma_buf_attach(dma_buf, rga_dev);
310 if (IS_ERR(attach)) {
311 ret = -EINVAL;
312 pr_err("Failed to attach dma_buf\n");
313 goto err_get_attach;
314 }
315
316 sgt = dma_buf_map_attachment(attach, dir);
317 if (IS_ERR(sgt)) {
318 ret = -EINVAL;
319 pr_err("Failed to map src attachment\n");
320 goto err_get_sgt;
321 }
322
323 rga_dma_buffer->dma_buf = dma_buf;
324 rga_dma_buffer->attach = attach;
325 rga_dma_buffer->sgt = sgt;
326 rga_dma_buffer->size = sg_dma_len(sgt->sgl);
327 rga_dma_buffer->dir = dir;
328
329 return ret;
330
331 err_get_sgt:
332 if (attach)
333 dma_buf_detach(dma_buf, attach);
334 err_get_attach:
335 if (dma_buf)
336 dma_buf_put(dma_buf);
337
338 return ret;
339 }
340
rga2_unmap_dma_buffer(struct rga_dma_buffer_t * rga_dma_buffer)341 static void rga2_unmap_dma_buffer(struct rga_dma_buffer_t *rga_dma_buffer)
342 {
343 if (rga_dma_buffer->attach && rga_dma_buffer->sgt)
344 dma_buf_unmap_attachment(rga_dma_buffer->attach,
345 rga_dma_buffer->sgt,
346 rga_dma_buffer->dir);
347 if (rga_dma_buffer->attach) {
348 dma_buf_detach(rga_dma_buffer->dma_buf, rga_dma_buffer->attach);
349 dma_buf_put(rga_dma_buffer->dma_buf);
350 }
351 }
352
rga2_convert_addr(struct rga_img_info_t * img)353 static void rga2_convert_addr(struct rga_img_info_t *img)
354 {
355 /*
356 * If it is not using dma fd, the virtual/phyical address is assigned
357 * to the address of the corresponding channel.
358 */
359 img->yrgb_addr = img->uv_addr;
360 img->uv_addr = img->yrgb_addr + (img->vir_w * img->vir_h);
361 if (rga2_is_yuv422p_format(img->format))
362 img->v_addr = img->uv_addr + (img->vir_w * img->vir_h) / 2;
363 else
364 img->v_addr = img->uv_addr + (img->vir_w * img->vir_h) / 4;
365 }
366
rga2_get_dma_info(struct rga2_reg * reg,struct rga2_req * req)367 int rga2_get_dma_info(struct rga2_reg *reg, struct rga2_req *req)
368 {
369 uint32_t mmu_flag;
370 int ret;
371
372 struct rga_dma_buffer_t *buffer_src0, *buffer_src1, *buffer_dst, *buffer_els;
373 struct rga_img_info_t *src0, *src1, *dst, *els;
374
375 /*
376 * Since the life cycle of rga2_req cannot satisfy the release of
377 * dmabuffer after the task is over, the mapped dmabuffer is saved
378 * in rga2_reg.
379 */
380 buffer_src0 = ®->dma_buffer_src0;
381 buffer_src1 = ®->dma_buffer_src1;
382 buffer_dst = ®->dma_buffer_dst;
383 buffer_els = ®->dma_buffer_els;
384
385 src0 = &req->src;
386 src1 = &req->src1;
387 dst = &req->dst;
388 els = &req->pat;
389
390 /* src0 chanel */
391 mmu_flag = req->mmu_info.src0_mmu_flag;
392 if (unlikely(!mmu_flag && src0->yrgb_addr)) {
393 pr_err("Fix it please enable src0 mmu\n");
394 return -EINVAL;
395 } else if (mmu_flag && src0->yrgb_addr) {
396 ret = rga2_map_dma_buffer(src0->yrgb_addr, buffer_src0, DMA_BIDIRECTIONAL);
397 if (ret < 0) {
398 pr_err("src0: can't map dma-buf\n");
399 return ret;
400 }
401 }
402 #ifdef CONFIG_ROCKCHIP_RGA2_DEBUGGER
403 if (RGA2_CHECK_MODE) {
404 ret = rga2_dma_memory_check(buffer_src0, src0);
405 if (ret < 0) {
406 pr_err("src0 channel check memory error!\n");
407 return ret;
408 }
409 }
410 #endif
411 rga2_convert_addr(src0);
412
413 /* src1 chanel */
414 mmu_flag = req->mmu_info.src1_mmu_flag;
415 if (unlikely(!mmu_flag && src1->yrgb_addr)) {
416 pr_err("Fix it please enable src1 mmu\n");
417 ret = -EINVAL;
418 goto err_src1_channel;
419 } else if (mmu_flag && src1->yrgb_addr) {
420 ret = rga2_map_dma_buffer(src1->yrgb_addr, buffer_src1, DMA_BIDIRECTIONAL);
421 if (ret < 0) {
422 pr_err("src1: can't map dma-buf\n");
423 goto err_src1_channel;
424 }
425 }
426 #ifdef CONFIG_ROCKCHIP_RGA2_DEBUGGER
427 if (RGA2_CHECK_MODE) {
428 ret = rga2_dma_memory_check(buffer_src1, src1);
429 if (ret < 0) {
430 pr_err("src1 channel check memory error!\n");
431 goto err_src1_channel;
432 }
433 }
434 #endif
435 rga2_convert_addr(src1);
436
437 /* dst chanel */
438 mmu_flag = req->mmu_info.dst_mmu_flag;
439 if (unlikely(!mmu_flag && dst->yrgb_addr)) {
440 pr_err("Fix it please enable dst mmu\n");
441 ret = -EINVAL;
442 goto err_dst_channel;
443 } else if (mmu_flag && dst->yrgb_addr) {
444 ret = rga2_map_dma_buffer(dst->yrgb_addr, buffer_dst, DMA_BIDIRECTIONAL);
445 if (ret < 0) {
446 pr_err("dst: can't map dma-buf\n");
447 goto err_dst_channel;
448 }
449 }
450 #ifdef CONFIG_ROCKCHIP_RGA2_DEBUGGER
451 if (RGA2_CHECK_MODE) {
452 ret = rga2_dma_memory_check(buffer_dst, dst);
453 if (ret < 0) {
454 pr_err("dst channel check memory error!\n");
455 goto err_dst_channel;
456 }
457 }
458 #endif
459 rga2_convert_addr(dst);
460
461 /* els chanel */
462 mmu_flag = req->mmu_info.els_mmu_flag;
463 if (unlikely(!mmu_flag && els->yrgb_addr)) {
464 pr_err("Fix it please enable els mmu\n");
465 ret = -EINVAL;
466 goto err_els_channel;
467 } else if (mmu_flag && els->yrgb_addr) {
468 ret = rga2_map_dma_buffer(els->yrgb_addr, buffer_els, DMA_BIDIRECTIONAL);
469 if (ret < 0) {
470 pr_err("els: can't map dma-buf\n");
471 goto err_els_channel;
472 }
473 }
474 #ifdef CONFIG_ROCKCHIP_RGA2_DEBUGGER
475 if (RGA2_CHECK_MODE) {
476 ret = rga2_dma_memory_check(buffer_els, els);
477 if (ret < 0) {
478 pr_err("els channel check memory error!\n");
479 goto err_els_channel;
480 }
481 }
482 #endif
483 rga2_convert_addr(els);
484
485 return 0;
486
487 err_els_channel:
488 rga2_unmap_dma_buffer(buffer_dst);
489 err_dst_channel:
490 rga2_unmap_dma_buffer(buffer_src1);
491 err_src1_channel:
492 rga2_unmap_dma_buffer(buffer_src0);
493
494 return ret;
495 }
496
rga2_put_dma_info(struct rga2_reg * reg)497 void rga2_put_dma_info(struct rga2_reg *reg)
498 {
499 rga2_unmap_dma_buffer(®->dma_buffer_src0);
500 rga2_unmap_dma_buffer(®->dma_buffer_src1);
501 rga2_unmap_dma_buffer(®->dma_buffer_dst);
502 rga2_unmap_dma_buffer(®->dma_buffer_els);
503 }
504 #else
rga2_get_dma_info(struct rga2_reg * reg,struct rga2_req * req)505 static int rga2_get_dma_info(struct rga2_reg *reg, struct rga2_req *req)
506 {
507 struct ion_handle *hdl;
508 ion_phys_addr_t phy_addr;
509 size_t len;
510 int ret;
511 u32 src_vir_w, dst_vir_w;
512 void *vaddr = NULL;
513 struct rga_dma_buffer_t *buffer_src0, *buffer_src1, *buffer_dst, *buffer_els;
514
515 src_vir_w = req->src.vir_w;
516 dst_vir_w = req->dst.vir_w;
517
518 buffer_src0 = ®->dma_buffer_src0;
519 buffer_src1 = ®->dma_buffer_src1;
520 buffer_dst = ®->dma_buffer_dst;
521 buffer_els = ®->dma_buffer_els;
522
523 if ((int)req->src.yrgb_addr > 0) {
524 hdl = ion_import_dma_buf(rga2_drvdata->ion_client,
525 req->src.yrgb_addr);
526 if (IS_ERR(hdl)) {
527 ret = PTR_ERR(hdl);
528 pr_err("RGA2 SRC ERROR ion buf handle\n");
529 return ret;
530 }
531 #ifdef CONFIG_ROCKCHIP_RGA2_DEBUGGER
532 if (RGA2_CHECK_MODE) {
533 vaddr = ion_map_kernel(rga2_drvdata->ion_client, hdl);
534 if (vaddr)
535 rga2_memory_check(vaddr, req->src.vir_w, req->src.vir_h,
536 req->src.format, req->src.yrgb_addr);
537 ion_unmap_kernel(rga2_drvdata->ion_client, hdl);
538 }
539 #endif
540 if (req->mmu_info.src0_mmu_flag) {
541 buffer_src0.sgt =
542 ion_sg_table(rga2_drvdata->ion_client, hdl);
543 req->src.yrgb_addr = req->src.uv_addr;
544 req->src.uv_addr =
545 req->src.yrgb_addr + (src_vir_w * req->src.vir_h);
546 req->src.v_addr =
547 req->src.uv_addr + (src_vir_w * req->src.vir_h) / 4;
548 } else {
549 ion_phys(rga2_drvdata->ion_client, hdl, &phy_addr, &len);
550 req->src.yrgb_addr = phy_addr;
551 req->src.uv_addr =
552 req->src.yrgb_addr + (src_vir_w * req->src.vir_h);
553 req->src.v_addr =
554 req->src.uv_addr + (src_vir_w * req->src.vir_h) / 4;
555 }
556 ion_free(rga2_drvdata->ion_client, hdl);
557 } else {
558 req->src.yrgb_addr = req->src.uv_addr;
559 req->src.uv_addr =
560 req->src.yrgb_addr + (src_vir_w * req->src.vir_h);
561 req->src.v_addr =
562 req->src.uv_addr + (src_vir_w * req->src.vir_h) / 4;
563 }
564
565 if ((int)req->dst.yrgb_addr > 0) {
566 hdl = ion_import_dma_buf(rga2_drvdata->ion_client,
567 req->dst.yrgb_addr);
568 if (IS_ERR(hdl)) {
569 ret = PTR_ERR(hdl);
570 pr_err("RGA2 DST ERROR ion buf handle\n");
571 return ret;
572 }
573 #ifdef CONFIG_ROCKCHIP_RGA2_DEBUGGER
574 if (RGA2_CHECK_MODE) {
575 vaddr = ion_map_kernel(rga2_drvdata->ion_client, hdl);
576 if (vaddr)
577 rga2_memory_check(vaddr, req->dst.vir_w, req->dst.vir_h,
578 req->dst.format, req->dst.yrgb_addr);
579 ion_unmap_kernel(rga2_drvdata->ion_client, hdl);
580 }
581 #endif
582 if (req->mmu_info.dst_mmu_flag) {
583 buffer_dst.sgt =
584 ion_sg_table(rga2_drvdata->ion_client, hdl);
585 req->dst.yrgb_addr = req->dst.uv_addr;
586 req->dst.uv_addr =
587 req->dst.yrgb_addr + (dst_vir_w * req->dst.vir_h);
588 req->dst.v_addr =
589 req->dst.uv_addr + (dst_vir_w * req->dst.vir_h) / 4;
590 } else {
591 ion_phys(rga2_drvdata->ion_client, hdl, &phy_addr, &len);
592 req->dst.yrgb_addr = phy_addr;
593 req->dst.uv_addr =
594 req->dst.yrgb_addr + (dst_vir_w * req->dst.vir_h);
595 req->dst.v_addr =
596 req->dst.uv_addr + (dst_vir_w * req->dst.vir_h) / 4;
597 }
598 ion_free(rga2_drvdata->ion_client, hdl);
599 } else {
600 req->dst.yrgb_addr = req->dst.uv_addr;
601 req->dst.uv_addr =
602 req->dst.yrgb_addr + (dst_vir_w * req->dst.vir_h);
603 req->dst.v_addr =
604 req->dst.uv_addr + (dst_vir_w * req->dst.vir_h) / 4;
605 }
606
607 if ((int)req->src1.yrgb_addr > 0) {
608 hdl = ion_import_dma_buf(rga2_drvdata->ion_client,
609 req->src1.yrgb_addr);
610 if (IS_ERR(hdl)) {
611 ret = PTR_ERR(hdl);
612 pr_err("RGA2 ERROR ion buf handle\n");
613 return ret;
614 }
615 if (req->mmu_info.dst_mmu_flag) {
616 buffer_src1.sgt =
617 ion_sg_table(rga2_drvdata->ion_client, hdl);
618 req->src1.yrgb_addr = req->src1.uv_addr;
619 req->src1.uv_addr =
620 req->src1.yrgb_addr + (req->src1.vir_w * req->src1.vir_h);
621 req->src1.v_addr =
622 req->src1.uv_addr + (req->src1.vir_w * req->src1.vir_h) / 4;
623 } else {
624 ion_phys(rga2_drvdata->ion_client, hdl, &phy_addr, &len);
625 req->src1.yrgb_addr = phy_addr;
626 req->src1.uv_addr =
627 req->src1.yrgb_addr + (req->src1.vir_w * req->src1.vir_h);
628 req->src1.v_addr =
629 req->src1.uv_addr + (req->src1.vir_w * req->src1.vir_h) / 4;
630 }
631 ion_free(rga2_drvdata->ion_client, hdl);
632 } else {
633 req->src1.yrgb_addr = req->src1.uv_addr;
634 req->src1.uv_addr =
635 req->src1.yrgb_addr + (req->src1.vir_w * req->src1.vir_h);
636 req->src1.v_addr =
637 req->src1.uv_addr + (req->src1.vir_w * req->src1.vir_h) / 4;
638 }
639 if (rga2_is_yuv422p_format(req->src.format))
640 req->src.v_addr = req->src.uv_addr + (req->src.vir_w * req->src.vir_h) / 2;
641 if (rga2_is_yuv422p_format(req->dst.format))
642 req->dst.v_addr = req->dst.uv_addr + (req->dst.vir_w * req->dst.vir_h) / 2;
643 if (rga2_is_yuv422p_format(req->src1.format))
644 req->src1.v_addr = req->src1.uv_addr + (req->src1.vir_w * req->dst.vir_h) / 2;
645
646 return 0;
647 }
648
649 /* When the kernel version is lower than 4.4, no put buffer operation is required. */
rga2_put_dma_info(struct rga2_reg * reg)650 void rga2_put_dma_info(struct rga2_reg *reg) {}
651 #endif
652
rga2_mmu_buf_get(struct rga2_mmu_buf_t * t,uint32_t size)653 static int rga2_mmu_buf_get(struct rga2_mmu_buf_t *t, uint32_t size)
654 {
655 mutex_lock(&rga2_service.lock);
656 t->front += size;
657 mutex_unlock(&rga2_service.lock);
658
659 return 0;
660 }
661
rga2_mmu_buf_get_try(struct rga2_mmu_buf_t * t,uint32_t size)662 static int rga2_mmu_buf_get_try(struct rga2_mmu_buf_t *t, uint32_t size)
663 {
664 int ret = 0;
665
666 mutex_lock(&rga2_service.lock);
667 if ((t->back - t->front) > t->size) {
668 if (t->front + size > t->back - t->size) {
669 pr_info("front %d, back %d dsize %d size %d",
670 t->front, t->back, t->size, size);
671 ret = -ENOMEM;
672 goto out;
673 }
674 } else {
675 if ((t->front + size) > t->back) {
676 pr_info("front %d, back %d dsize %d size %d",
677 t->front, t->back, t->size, size);
678 ret = -ENOMEM;
679 goto out;
680 }
681
682 if (t->front + size > t->size) {
683 if (size > (t->back - t->size)) {
684 pr_info("front %d, back %d dsize %d size %d",
685 t->front, t->back, t->size, size);
686 ret = -ENOMEM;
687 goto out;
688 }
689 t->front = 0;
690 }
691 }
692 out:
693 mutex_unlock(&rga2_service.lock);
694 return ret;
695 }
696
rga2_mem_size_cal(unsigned long Mem,uint32_t MemSize,unsigned long * StartAddr)697 static int rga2_mem_size_cal(unsigned long Mem, uint32_t MemSize, unsigned long *StartAddr)
698 {
699 unsigned long start, end;
700 uint32_t pageCount;
701
702 end = (Mem + (MemSize + PAGE_SIZE - 1)) >> PAGE_SHIFT;
703 start = Mem >> PAGE_SHIFT;
704 pageCount = end - start;
705 *StartAddr = start;
706 return pageCount;
707 }
708
rga2_buf_size_cal(unsigned long yrgb_addr,unsigned long uv_addr,unsigned long v_addr,int format,uint32_t w,uint32_t h,unsigned long * StartAddr)709 static int rga2_buf_size_cal(unsigned long yrgb_addr, unsigned long uv_addr, unsigned long v_addr,
710 int format, uint32_t w, uint32_t h, unsigned long *StartAddr )
711 {
712 uint32_t size_yrgb = 0;
713 uint32_t size_uv = 0;
714 uint32_t size_v = 0;
715 uint32_t stride = 0;
716 unsigned long start, end;
717 uint32_t pageCount;
718
719 switch(format)
720 {
721 case RGA2_FORMAT_RGBA_8888 :
722 case RGA2_FORMAT_RGBX_8888 :
723 case RGA2_FORMAT_BGRA_8888 :
724 case RGA2_FORMAT_BGRX_8888 :
725 case RGA2_FORMAT_ARGB_8888 :
726 case RGA2_FORMAT_XRGB_8888 :
727 case RGA2_FORMAT_ABGR_8888 :
728 case RGA2_FORMAT_XBGR_8888 :
729 stride = (w * 4 + 3) & (~3);
730 size_yrgb = stride*h;
731 start = yrgb_addr >> PAGE_SHIFT;
732 end = yrgb_addr + size_yrgb;
733 end = (end + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
734 pageCount = end - start;
735 break;
736 case RGA2_FORMAT_RGB_888 :
737 case RGA2_FORMAT_BGR_888 :
738 stride = (w * 3 + 3) & (~3);
739 size_yrgb = stride*h;
740 start = yrgb_addr >> PAGE_SHIFT;
741 end = yrgb_addr + size_yrgb;
742 end = (end + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
743 pageCount = end - start;
744 break;
745 case RGA2_FORMAT_RGB_565 :
746 case RGA2_FORMAT_RGBA_5551 :
747 case RGA2_FORMAT_RGBA_4444 :
748 case RGA2_FORMAT_BGR_565 :
749 case RGA2_FORMAT_BGRA_5551 :
750 case RGA2_FORMAT_BGRA_4444 :
751 case RGA2_FORMAT_ARGB_5551 :
752 case RGA2_FORMAT_ARGB_4444 :
753 case RGA2_FORMAT_ABGR_5551 :
754 case RGA2_FORMAT_ABGR_4444 :
755 stride = (w*2 + 3) & (~3);
756 size_yrgb = stride * h;
757 start = yrgb_addr >> PAGE_SHIFT;
758 end = yrgb_addr + size_yrgb;
759 end = (end + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
760 pageCount = end - start;
761 break;
762
763 /* YUV FORMAT */
764 case RGA2_FORMAT_YCbCr_422_SP :
765 case RGA2_FORMAT_YCrCb_422_SP :
766 stride = (w + 3) & (~3);
767 size_yrgb = stride * h;
768 size_uv = stride * h;
769 start = MIN(yrgb_addr, uv_addr);
770 start >>= PAGE_SHIFT;
771 end = MAX((yrgb_addr + size_yrgb), (uv_addr + size_uv));
772 end = (end + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
773 pageCount = end - start;
774 break;
775 case RGA2_FORMAT_YCbCr_422_P :
776 case RGA2_FORMAT_YCrCb_422_P :
777 stride = (w + 3) & (~3);
778 size_yrgb = stride * h;
779 size_uv = ((stride >> 1) * h);
780 size_v = ((stride >> 1) * h);
781 start = MIN(MIN(yrgb_addr, uv_addr), v_addr);
782 start = start >> PAGE_SHIFT;
783 end = MAX(MAX((yrgb_addr + size_yrgb), (uv_addr + size_uv)), (v_addr + size_v));
784 end = (end + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
785 pageCount = end - start;
786 break;
787 case RGA2_FORMAT_YCbCr_420_SP :
788 case RGA2_FORMAT_YCrCb_420_SP :
789 stride = (w + 3) & (~3);
790 size_yrgb = stride * h;
791 size_uv = (stride * (h >> 1));
792 start = MIN(yrgb_addr, uv_addr);
793 start >>= PAGE_SHIFT;
794 end = MAX((yrgb_addr + size_yrgb), (uv_addr + size_uv));
795 end = (end + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
796 pageCount = end - start;
797 break;
798 case RGA2_FORMAT_YCbCr_420_P :
799 case RGA2_FORMAT_YCrCb_420_P :
800 stride = (w + 3) & (~3);
801 size_yrgb = stride * h;
802 size_uv = ((stride >> 1) * (h >> 1));
803 size_v = ((stride >> 1) * (h >> 1));
804 start = MIN(MIN(yrgb_addr, uv_addr), v_addr);
805 start >>= PAGE_SHIFT;
806 end = MAX(MAX((yrgb_addr + size_yrgb), (uv_addr + size_uv)), (v_addr + size_v));
807 end = (end + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
808 pageCount = end - start;
809 break;
810 case RGA2_FORMAT_YCbCr_400:
811 stride = (w + 3) & (~3);
812 size_yrgb = stride * h;
813 size_uv = 0;
814 size_v = 0;
815 start = yrgb_addr >> PAGE_SHIFT;
816 end = yrgb_addr + size_yrgb;
817 end = (end + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
818 pageCount = end - start;
819 break;
820 case RGA2_FORMAT_Y4:
821 stride = ((w + 3) & (~3) ) >> 1;
822 size_yrgb = stride * h;
823 size_uv = 0;
824 size_v = 0;
825 start = yrgb_addr >> PAGE_SHIFT;
826 end = yrgb_addr + size_yrgb;
827 end = (end + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
828 pageCount = end - start;
829 break;
830 case RGA2_FORMAT_YVYU_422:
831 case RGA2_FORMAT_VYUY_422:
832 case RGA2_FORMAT_YUYV_422:
833 case RGA2_FORMAT_UYVY_422:
834 stride = (w + 3) & (~3);
835 size_yrgb = stride * h;
836 size_uv = stride * h;
837 start = MIN(yrgb_addr, uv_addr);
838 start >>= PAGE_SHIFT;
839 end = MAX((yrgb_addr + size_yrgb), (uv_addr + size_uv));
840 end = (end + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
841 pageCount = end - start;
842 break;
843 case RGA2_FORMAT_YVYU_420:
844 case RGA2_FORMAT_VYUY_420:
845 case RGA2_FORMAT_YUYV_420:
846 case RGA2_FORMAT_UYVY_420:
847 stride = (w + 3) & (~3);
848 size_yrgb = stride * h;
849 size_uv = (stride * (h >> 1));
850 start = MIN(yrgb_addr, uv_addr);
851 start >>= PAGE_SHIFT;
852 end = MAX((yrgb_addr + size_yrgb), (uv_addr + size_uv));
853 end = (end + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
854 pageCount = end - start;
855 break;
856 #if 0
857 case RK_FORMAT_BPP1 :
858 break;
859 case RK_FORMAT_BPP2 :
860 break;
861 case RK_FORMAT_BPP4 :
862 break;
863 case RK_FORMAT_BPP8 :
864 break;
865 #endif
866 case RGA2_FORMAT_YCbCr_420_SP_10B:
867 case RGA2_FORMAT_YCrCb_420_SP_10B:
868 stride = (w + 3) & (~3);
869 size_yrgb = stride * h;
870 size_uv = (stride * (h >> 1));
871 start = MIN(yrgb_addr, uv_addr);
872 start >>= PAGE_SHIFT;
873 end = MAX((yrgb_addr + size_yrgb), (uv_addr + size_uv));
874 end = (end + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
875 pageCount = end - start;
876 break;
877 default :
878 pageCount = 0;
879 start = 0;
880 break;
881 }
882
883 *StartAddr = start;
884 return pageCount;
885 }
886
rga2_MapUserMemory(struct page ** pages,uint32_t * pageTable,unsigned long Memory,uint32_t pageCount,int writeFlag,int map)887 static int rga2_MapUserMemory(struct page **pages, uint32_t *pageTable,
888 unsigned long Memory, uint32_t pageCount,
889 int writeFlag, int map)
890 {
891 struct vm_area_struct *vma;
892 int32_t result;
893 uint32_t i;
894 uint32_t status;
895 unsigned long Address;
896 unsigned long pfn;
897 struct page __maybe_unused *page;
898 spinlock_t * ptl;
899 pte_t * pte;
900 pgd_t * pgd;
901 #if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 10, 0)
902 p4d_t * p4d;
903 #endif
904 pud_t * pud;
905 pmd_t * pmd;
906
907 status = 0;
908 Address = 0;
909
910 #if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 10, 0)
911 mmap_read_lock(current->mm);
912 #else
913 down_read(¤t->mm->mmap_sem);
914 #endif
915
916 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 168) && LINUX_VERSION_CODE < KERNEL_VERSION(4, 5, 0)
917 result = get_user_pages(current, current->mm, Memory << PAGE_SHIFT,
918 pageCount, writeFlag ? FOLL_WRITE : 0,
919 pages, NULL);
920 #elif LINUX_VERSION_CODE < KERNEL_VERSION(4, 6, 0)
921 result = get_user_pages(current, current->mm, Memory << PAGE_SHIFT,
922 pageCount, writeFlag, 0, pages, NULL);
923 #elif LINUX_VERSION_CODE < KERNEL_VERSION(5, 10, 0)
924 result = get_user_pages_remote(current, current->mm,
925 Memory << PAGE_SHIFT,
926 pageCount, writeFlag, pages, NULL, NULL);
927 #else
928 result = get_user_pages_remote(current->mm, Memory << PAGE_SHIFT,
929 pageCount, writeFlag, pages, NULL, NULL);
930 #endif
931
932 if (result > 0 && result >= pageCount) {
933 /* Fill the page table. */
934 for (i = 0; i < pageCount; i++) {
935 /* Get the physical address from page struct. */
936 pageTable[i] = rga2_dma_flush_page(pages[i], map);
937 }
938
939 for (i = 0; i < result; i++)
940 put_page(pages[i]);
941 #if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 10, 0)
942 mmap_read_unlock(current->mm);
943 #else
944 up_read(¤t->mm->mmap_sem);
945 #endif
946 return 0;
947 }
948 if (result > 0) {
949 for (i = 0; i < result; i++)
950 put_page(pages[i]);
951 }
952 for (i = 0; i < pageCount; i++) {
953 vma = find_vma(current->mm, (Memory + i) << PAGE_SHIFT);
954 if (!vma) {
955 pr_err("RGA2 failed to get vma, result = %d, pageCount = %d\n",
956 result, pageCount);
957 status = RGA2_OUT_OF_RESOURCES;
958 break;
959 }
960 pgd = pgd_offset(current->mm, (Memory + i) << PAGE_SHIFT);
961 if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd))) {
962 pr_err("RGA2 failed to get pgd, result = %d, pageCount = %d\n",
963 result, pageCount);
964 status = RGA2_OUT_OF_RESOURCES;
965 break;
966 }
967 #if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 10, 0)
968 /* In the four-level page table, it will do nothing and return pgd. */
969 p4d = p4d_offset(pgd, (Memory + i) << PAGE_SHIFT);
970 if (p4d_none(*p4d) || unlikely(p4d_bad(*p4d))) {
971 pr_err("RGA2 failed to get p4d, result = %d, pageCount = %d\n",
972 result, pageCount);
973 status = RGA2_OUT_OF_RESOURCES;
974 break;
975 }
976
977 pud = pud_offset(p4d, (Memory + i) << PAGE_SHIFT);
978 #else
979 pud = pud_offset(pgd, (Memory + i) << PAGE_SHIFT);
980 #endif
981 if (pud_none(*pud) || unlikely(pud_bad(*pud))) {
982 pr_err("RGA2 failed to get pud, result = %d, pageCount = %d\n",
983 result, pageCount);
984 status = RGA2_OUT_OF_RESOURCES;
985 break;
986 }
987 pmd = pmd_offset(pud, (Memory + i) << PAGE_SHIFT);
988 if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd))) {
989 pr_err("RGA2 failed to get pmd, result = %d, pageCount = %d\n",
990 result, pageCount);
991 status = RGA2_OUT_OF_RESOURCES;
992 break;
993 }
994 pte = pte_offset_map_lock(current->mm, pmd,
995 (Memory + i) << PAGE_SHIFT,
996 &ptl);
997 if (pte_none(*pte)) {
998 pr_err("RGA2 failed to get pte, result = %d, pageCount = %d\n",
999 result, pageCount);
1000 pte_unmap_unlock(pte, ptl);
1001 status = RGA2_OUT_OF_RESOURCES;
1002 break;
1003 }
1004 pfn = pte_pfn(*pte);
1005 Address = ((pfn << PAGE_SHIFT) |
1006 (((unsigned long)((Memory + i) << PAGE_SHIFT)) & ~PAGE_MASK));
1007
1008 pageTable[i] = rga2_dma_flush_page(phys_to_page(Address), map);
1009
1010 pte_unmap_unlock(pte, ptl);
1011 }
1012 #if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 10, 0)
1013 mmap_read_unlock(current->mm);
1014 #else
1015 up_read(¤t->mm->mmap_sem);
1016 #endif
1017 return status;
1018 }
1019
rga2_MapION(struct sg_table * sg,uint32_t * Memory,int32_t pageCount)1020 static int rga2_MapION(struct sg_table *sg,
1021 uint32_t *Memory,
1022 int32_t pageCount)
1023 {
1024 uint32_t i;
1025 uint32_t status;
1026 unsigned long Address;
1027 uint32_t mapped_size = 0;
1028 uint32_t len;
1029 struct scatterlist *sgl = sg->sgl;
1030 uint32_t sg_num = 0;
1031 uint32_t break_flag = 0;
1032
1033 status = 0;
1034 Address = 0;
1035 do {
1036 len = sg_dma_len(sgl) >> PAGE_SHIFT;
1037 /*
1038 * The fd passed by user space gets sg through dma_buf_map_attachment,
1039 * so dma_address can be use here.
1040 */
1041 Address = sg_dma_address(sgl);
1042
1043 for(i=0; i<len; i++) {
1044 if (mapped_size + i >= pageCount) {
1045 break_flag = 1;
1046 break;
1047 }
1048 Memory[mapped_size + i] = (uint32_t)(Address + (i << PAGE_SHIFT));
1049 }
1050 if (break_flag)
1051 break;
1052 mapped_size += len;
1053 sg_num += 1;
1054 }
1055 while((sgl = sg_next(sgl)) && (mapped_size < pageCount) && (sg_num < sg->nents));
1056
1057 return 0;
1058 }
1059
rga2_mmu_flush_cache(struct rga2_reg * reg,struct rga2_req * req)1060 static int rga2_mmu_flush_cache(struct rga2_reg *reg, struct rga2_req *req)
1061 {
1062 int DstMemSize;
1063 unsigned long DstStart, DstPageCount;
1064 uint32_t *MMU_Base, *MMU_Base_phys;
1065 int ret;
1066 int status;
1067 struct page **pages = NULL;
1068 struct rga_dma_buffer_t *dma_buffer = NULL;
1069
1070 MMU_Base = NULL;
1071 DstMemSize = 0;
1072 DstPageCount = 0;
1073 DstStart = 0;
1074
1075 if (reg->MMU_map != true) {
1076 status = -EINVAL;
1077 goto out;
1078 }
1079
1080 /* cal dst buf mmu info */
1081 if (req->mmu_info.dst_mmu_flag & 1) {
1082 DstPageCount = rga2_buf_size_cal(req->dst.yrgb_addr,
1083 req->dst.uv_addr,
1084 req->dst.v_addr,
1085 req->dst.format,
1086 req->dst.vir_w,
1087 req->dst.vir_h,
1088 &DstStart);
1089 if (DstPageCount == 0)
1090 return -EINVAL;
1091 }
1092 /* Cal out the needed mem size */
1093 DstMemSize = (DstPageCount + 15) & (~15);
1094
1095 if (rga2_mmu_buf_get_try(&rga2_mmu_buf, DstMemSize)) {
1096 pr_err("RGA2 Get MMU mem failed\n");
1097 status = RGA2_MALLOC_ERROR;
1098 goto out;
1099 }
1100 pages = rga2_mmu_buf.pages;
1101 mutex_lock(&rga2_service.lock);
1102 MMU_Base = rga2_mmu_buf.buf_virtual +
1103 (rga2_mmu_buf.front & (rga2_mmu_buf.size - 1));
1104 MMU_Base_phys = rga2_mmu_buf.buf +
1105 (rga2_mmu_buf.front & (rga2_mmu_buf.size - 1));
1106
1107 mutex_unlock(&rga2_service.lock);
1108 if (DstMemSize) {
1109 dma_buffer = ®->dma_buffer_dst;
1110 if (dma_buffer->sgt) {
1111 status = -EINVAL;
1112 goto out;
1113 } else {
1114 ret = rga2_MapUserMemory(&pages[0],
1115 MMU_Base,
1116 DstStart, DstPageCount, 1,
1117 MMU_MAP_CLEAN | MMU_MAP_INVALID);
1118 #ifdef CONFIG_ROCKCHIP_RGA2_DEBUGGER
1119 if (RGA2_CHECK_MODE)
1120 rga2_user_memory_check(&pages[0],
1121 req->dst.vir_w,
1122 req->dst.vir_h,
1123 req->dst.format,
1124 2);
1125 #endif
1126 }
1127 if (ret < 0) {
1128 pr_err("rga2 unmap dst memory failed\n");
1129 status = ret;
1130 goto out;
1131 }
1132 }
1133 rga2_mmu_buf_get(&rga2_mmu_buf, DstMemSize);
1134 reg->MMU_len = DstMemSize;
1135 status = 0;
1136 out:
1137 return status;
1138 }
1139
rga2_mmu_info_BitBlt_mode(struct rga2_reg * reg,struct rga2_req * req)1140 static int rga2_mmu_info_BitBlt_mode(struct rga2_reg *reg, struct rga2_req *req)
1141 {
1142 int Src0MemSize, DstMemSize, Src1MemSize;
1143 unsigned long Src0Start, Src1Start, DstStart;
1144 unsigned long Src0PageCount, Src1PageCount, DstPageCount;
1145 uint32_t AllSize;
1146 uint32_t *MMU_Base, *MMU_Base_phys;
1147 int ret;
1148 int status;
1149 uint32_t uv_size, v_size;
1150 struct page **pages = NULL;
1151 struct rga_dma_buffer_t *dma_buffer = NULL;
1152
1153 MMU_Base = NULL;
1154 Src0MemSize = 0;
1155 Src1MemSize = 0;
1156 DstMemSize = 0;
1157 Src0PageCount = 0;
1158 Src1PageCount = 0;
1159 DstPageCount = 0;
1160 Src0Start = 0;
1161 Src1Start = 0;
1162 DstStart = 0;
1163
1164 /* cal src0 buf mmu info */
1165 if (req->mmu_info.src0_mmu_flag & 1) {
1166 Src0PageCount = rga2_buf_size_cal(req->src.yrgb_addr,
1167 req->src.uv_addr,
1168 req->src.v_addr,
1169 req->src.format,
1170 req->src.vir_w,
1171 (req->src.vir_h),
1172 &Src0Start);
1173 if (Src0PageCount == 0)
1174 return -EINVAL;
1175 }
1176 /* cal src1 buf mmu info */
1177 if (req->mmu_info.src1_mmu_flag & 1) {
1178 Src1PageCount = rga2_buf_size_cal(req->src1.yrgb_addr,
1179 req->src1.uv_addr,
1180 req->src1.v_addr,
1181 req->src1.format,
1182 req->src1.vir_w,
1183 (req->src1.vir_h),
1184 &Src1Start);
1185 if (Src1PageCount == 0)
1186 return -EINVAL;
1187 }
1188 /* cal dst buf mmu info */
1189 if (req->mmu_info.dst_mmu_flag & 1) {
1190 DstPageCount = rga2_buf_size_cal(req->dst.yrgb_addr,
1191 req->dst.uv_addr,
1192 req->dst.v_addr,
1193 req->dst.format,
1194 req->dst.vir_w,
1195 req->dst.vir_h,
1196 &DstStart);
1197 if (DstPageCount == 0)
1198 return -EINVAL;
1199 }
1200 /* Cal out the needed mem size */
1201 Src0MemSize = (Src0PageCount + 15) & (~15);
1202 Src1MemSize = (Src1PageCount + 15) & (~15);
1203 DstMemSize = (DstPageCount + 15) & (~15);
1204 AllSize = Src0MemSize + Src1MemSize + DstMemSize;
1205
1206 if (rga2_mmu_buf_get_try(&rga2_mmu_buf, AllSize)) {
1207 pr_err("RGA2 Get MMU mem failed\n");
1208 status = RGA2_MALLOC_ERROR;
1209 goto out;
1210 }
1211
1212 pages = rga2_mmu_buf.pages;
1213 if(pages == NULL) {
1214 pr_err("RGA MMU malloc pages mem failed\n");
1215 return -EINVAL;
1216 }
1217
1218 mutex_lock(&rga2_service.lock);
1219 MMU_Base = rga2_mmu_buf.buf_virtual + rga2_mmu_buf.front;
1220 MMU_Base_phys = rga2_mmu_buf.buf + rga2_mmu_buf.front;
1221 mutex_unlock(&rga2_service.lock);
1222
1223 if (Src0MemSize) {
1224 dma_buffer = ®->dma_buffer_src0;
1225
1226 if (dma_buffer->sgt) {
1227 ret = rga2_MapION(dma_buffer->sgt,
1228 &MMU_Base[0], Src0MemSize);
1229 } else {
1230 ret = rga2_MapUserMemory(&pages[0], &MMU_Base[0],
1231 Src0Start, Src0PageCount,
1232 0, MMU_MAP_CLEAN);
1233 #ifdef CONFIG_ROCKCHIP_RGA2_DEBUGGER
1234 if (RGA2_CHECK_MODE)
1235 rga2_user_memory_check(&pages[0],
1236 req->src.vir_w,
1237 req->src.vir_h,
1238 req->src.format,
1239 1);
1240 #endif
1241
1242 /* Save pagetable to unmap. */
1243 reg->MMU_src0_base = MMU_Base;
1244 reg->MMU_src0_count = Src0PageCount;
1245 }
1246
1247 if (ret < 0) {
1248 pr_err("rga2 map src0 memory failed\n");
1249 status = ret;
1250 goto out;
1251 }
1252 /* change the buf address in req struct */
1253 req->mmu_info.src0_base_addr = (((unsigned long)MMU_Base_phys));
1254 uv_size = (req->src.uv_addr
1255 - (Src0Start << PAGE_SHIFT)) >> PAGE_SHIFT;
1256 v_size = (req->src.v_addr
1257 - (Src0Start << PAGE_SHIFT)) >> PAGE_SHIFT;
1258
1259 req->src.yrgb_addr = (req->src.yrgb_addr & (~PAGE_MASK));
1260 req->src.uv_addr = (req->src.uv_addr & (~PAGE_MASK)) |
1261 (uv_size << PAGE_SHIFT);
1262 req->src.v_addr = (req->src.v_addr & (~PAGE_MASK)) |
1263 (v_size << PAGE_SHIFT);
1264 }
1265
1266 if (Src1MemSize) {
1267 dma_buffer = ®->dma_buffer_src1;
1268
1269 if (dma_buffer->sgt) {
1270 ret = rga2_MapION(dma_buffer->sgt,
1271 MMU_Base + Src0MemSize, Src1MemSize);
1272 } else {
1273 ret = rga2_MapUserMemory(&pages[0],
1274 MMU_Base + Src0MemSize,
1275 Src1Start, Src1PageCount,
1276 0, MMU_MAP_CLEAN);
1277
1278 /* Save pagetable to unmap. */
1279 reg->MMU_src1_base = MMU_Base + Src0MemSize;
1280 reg->MMU_src1_count = Src1PageCount;
1281 }
1282 if (ret < 0) {
1283 pr_err("rga2 map src1 memory failed\n");
1284 status = ret;
1285 goto out;
1286 }
1287 /* change the buf address in req struct */
1288 req->mmu_info.src1_base_addr = ((unsigned long)(MMU_Base_phys
1289 + Src0MemSize));
1290 req->src1.yrgb_addr = (req->src1.yrgb_addr & (~PAGE_MASK));
1291 }
1292 if (DstMemSize) {
1293 dma_buffer = ®->dma_buffer_dst;
1294
1295 if (dma_buffer->sgt) {
1296 ret = rga2_MapION(dma_buffer->sgt, MMU_Base + Src0MemSize
1297 + Src1MemSize, DstMemSize);
1298 } else if (req->alpha_mode_0 != 0 && req->bitblt_mode == 0) {
1299 /* The blend mode of src + dst => dst requires clean and invalidate */
1300 ret = rga2_MapUserMemory(&pages[0], MMU_Base
1301 + Src0MemSize + Src1MemSize,
1302 DstStart, DstPageCount, 1,
1303 MMU_MAP_CLEAN | MMU_MAP_INVALID);
1304 #ifdef CONFIG_ROCKCHIP_RGA2_DEBUGGER
1305 if (RGA2_CHECK_MODE)
1306 rga2_user_memory_check(&pages[0],
1307 req->dst.vir_w,
1308 req->dst.vir_h,
1309 req->dst.format,
1310 2);
1311 #endif
1312
1313 /* Save pagetable to invalid cache and unmap. */
1314 reg->MMU_dst_base = MMU_Base + Src0MemSize + Src1MemSize;
1315 reg->MMU_dst_count = DstPageCount;
1316 } else {
1317 ret = rga2_MapUserMemory(&pages[0], MMU_Base
1318 + Src0MemSize + Src1MemSize,
1319 DstStart, DstPageCount,
1320 1, MMU_MAP_INVALID);
1321 #ifdef CONFIG_ROCKCHIP_RGA2_DEBUGGER
1322 if (RGA2_CHECK_MODE)
1323 rga2_user_memory_check(&pages[0],
1324 req->dst.vir_w,
1325 req->dst.vir_h,
1326 req->dst.format,
1327 2);
1328 #endif
1329
1330 /* Save pagetable to invalid cache and unmap. */
1331 reg->MMU_dst_base = MMU_Base + Src0MemSize + Src1MemSize;
1332 reg->MMU_dst_count = DstPageCount;
1333 }
1334
1335 if (ret < 0) {
1336 pr_err("rga2 map dst memory failed\n");
1337 status = ret;
1338 goto out;
1339 }
1340 /* change the buf address in req struct */
1341 req->mmu_info.dst_base_addr = ((unsigned long)(MMU_Base_phys
1342 + Src0MemSize + Src1MemSize));
1343 req->dst.yrgb_addr = (req->dst.yrgb_addr & (~PAGE_MASK));
1344 uv_size = (req->dst.uv_addr
1345 - (DstStart << PAGE_SHIFT)) >> PAGE_SHIFT;
1346 v_size = (req->dst.v_addr
1347 - (DstStart << PAGE_SHIFT)) >> PAGE_SHIFT;
1348 req->dst.uv_addr = (req->dst.uv_addr & (~PAGE_MASK)) |
1349 ((uv_size) << PAGE_SHIFT);
1350 req->dst.v_addr = (req->dst.v_addr & (~PAGE_MASK)) |
1351 ((v_size) << PAGE_SHIFT);
1352
1353 if (((req->alpha_rop_flag & 1) == 1) && (req->bitblt_mode == 0)) {
1354 req->mmu_info.src1_base_addr = req->mmu_info.dst_base_addr;
1355 req->mmu_info.src1_mmu_flag = req->mmu_info.dst_mmu_flag;
1356 }
1357 }
1358
1359 /* flush data to DDR */
1360 rga2_dma_flush_range(MMU_Base, (MMU_Base + AllSize));
1361 rga2_mmu_buf_get(&rga2_mmu_buf, AllSize);
1362 reg->MMU_len = AllSize;
1363 status = 0;
1364 out:
1365 return status;
1366 }
1367
rga2_mmu_info_color_palette_mode(struct rga2_reg * reg,struct rga2_req * req)1368 static int rga2_mmu_info_color_palette_mode(struct rga2_reg *reg, struct rga2_req *req)
1369 {
1370 int SrcMemSize, DstMemSize;
1371 unsigned long SrcStart, DstStart;
1372 unsigned long SrcPageCount, DstPageCount;
1373 struct page **pages = NULL;
1374 uint32_t uv_size, v_size;
1375 uint32_t AllSize;
1376 uint32_t *MMU_Base = NULL, *MMU_Base_phys;
1377 int ret, status;
1378 uint32_t stride;
1379
1380 uint8_t shift;
1381 uint32_t sw, byte_num;
1382 struct rga_dma_buffer_t *dma_buffer = NULL;
1383
1384 shift = 3 - (req->palette_mode & 3);
1385 sw = req->src.vir_w*req->src.vir_h;
1386 byte_num = sw >> shift;
1387 stride = (byte_num + 3) & (~3);
1388
1389 SrcStart = 0;
1390 DstStart = 0;
1391 SrcPageCount = 0;
1392 DstPageCount = 0;
1393 SrcMemSize = 0;
1394 DstMemSize = 0;
1395
1396 do {
1397 if (req->mmu_info.src0_mmu_flag) {
1398 if (req->mmu_info.els_mmu_flag & 1) {
1399 req->mmu_info.src0_mmu_flag = 0;
1400 req->mmu_info.src1_mmu_flag = 0;
1401 } else {
1402 req->mmu_info.els_mmu_flag = req->mmu_info.src0_mmu_flag;
1403 req->mmu_info.src0_mmu_flag = 0;
1404 }
1405
1406 SrcPageCount = rga2_mem_size_cal(req->src.yrgb_addr, stride, &SrcStart);
1407 if(SrcPageCount == 0) {
1408 return -EINVAL;
1409 }
1410 }
1411
1412 if (req->mmu_info.dst_mmu_flag) {
1413 DstPageCount = rga2_buf_size_cal(req->dst.yrgb_addr, req->dst.uv_addr, req->dst.v_addr,
1414 req->dst.format, req->dst.vir_w, req->dst.vir_h,
1415 &DstStart);
1416 if(DstPageCount == 0) {
1417 return -EINVAL;
1418 }
1419 }
1420
1421 SrcMemSize = (SrcPageCount + 15) & (~15);
1422 DstMemSize = (DstPageCount + 15) & (~15);
1423
1424 AllSize = SrcMemSize + DstMemSize;
1425
1426 if (rga2_mmu_buf_get_try(&rga2_mmu_buf, AllSize)) {
1427 pr_err("RGA2 Get MMU mem failed\n");
1428 status = RGA2_MALLOC_ERROR;
1429 break;
1430 }
1431
1432 pages = rga2_mmu_buf.pages;
1433 if(pages == NULL) {
1434 pr_err("RGA MMU malloc pages mem failed\n");
1435 return -EINVAL;
1436 }
1437
1438 mutex_lock(&rga2_service.lock);
1439 MMU_Base = rga2_mmu_buf.buf_virtual + rga2_mmu_buf.front;
1440 MMU_Base_phys = rga2_mmu_buf.buf + rga2_mmu_buf.front;
1441 mutex_unlock(&rga2_service.lock);
1442
1443 if(SrcMemSize) {
1444 dma_buffer = ®->dma_buffer_src0;
1445
1446 if (dma_buffer->sgt) {
1447 ret = rga2_MapION(dma_buffer->sgt,
1448 &MMU_Base[0], SrcMemSize);
1449 } else {
1450 ret = rga2_MapUserMemory(&pages[0], &MMU_Base[0],
1451 SrcStart, SrcPageCount, 0, MMU_MAP_CLEAN);
1452 #ifdef CONFIG_ROCKCHIP_RGA2_DEBUGGER
1453 if (RGA2_CHECK_MODE)
1454 rga2_user_memory_check(&pages[0], req->src.vir_w,
1455 req->src.vir_h, req->src.format,
1456 1);
1457 #endif
1458 }
1459 if (ret < 0) {
1460 pr_err("rga2 map src0 memory failed\n");
1461 status = ret;
1462 break;
1463 }
1464
1465 /* change the buf address in req struct */
1466 req->mmu_info.els_base_addr = (((unsigned long)MMU_Base_phys));
1467 /*
1468 *The color palette mode will not have YUV format as input,
1469 *so UV component address is not needed
1470 */
1471 req->src.yrgb_addr = (req->src.yrgb_addr & (~PAGE_MASK));
1472 }
1473
1474 if(DstMemSize) {
1475 dma_buffer = ®->dma_buffer_dst;
1476
1477 if (dma_buffer->sgt) {
1478 ret = rga2_MapION(dma_buffer->sgt,
1479 MMU_Base + SrcMemSize, DstMemSize);
1480 } else {
1481 ret = rga2_MapUserMemory(&pages[0], MMU_Base + SrcMemSize,
1482 DstStart, DstPageCount, 1, MMU_MAP_INVALID);
1483 #ifdef CONFIG_ROCKCHIP_RGA2_DEBUGGER
1484 if (RGA2_CHECK_MODE)
1485 rga2_user_memory_check(&pages[0], req->dst.vir_w,
1486 req->dst.vir_h, req->dst.format,
1487 1);
1488 #endif
1489 }
1490 if (ret < 0) {
1491 pr_err("rga2 map dst memory failed\n");
1492 status = ret;
1493 break;
1494 }
1495 /* change the buf address in req struct */
1496 req->mmu_info.dst_base_addr = ((unsigned long)(MMU_Base_phys + SrcMemSize));
1497 req->dst.yrgb_addr = (req->dst.yrgb_addr & (~PAGE_MASK));
1498
1499 uv_size = (req->dst.uv_addr
1500 - (DstStart << PAGE_SHIFT)) >> PAGE_SHIFT;
1501 v_size = (req->dst.v_addr
1502 - (DstStart << PAGE_SHIFT)) >> PAGE_SHIFT;
1503 req->dst.uv_addr = (req->dst.uv_addr & (~PAGE_MASK)) |
1504 ((uv_size) << PAGE_SHIFT);
1505 req->dst.v_addr = (req->dst.v_addr & (~PAGE_MASK)) |
1506 ((v_size) << PAGE_SHIFT);
1507 }
1508
1509 /* flush data to DDR */
1510 rga2_dma_flush_range(MMU_Base, (MMU_Base + AllSize));
1511 rga2_mmu_buf_get(&rga2_mmu_buf, AllSize);
1512 reg->MMU_len = AllSize;
1513
1514 return 0;
1515 }
1516 while(0);
1517
1518 return 0;
1519 }
1520
rga2_mmu_info_color_fill_mode(struct rga2_reg * reg,struct rga2_req * req)1521 static int rga2_mmu_info_color_fill_mode(struct rga2_reg *reg, struct rga2_req *req)
1522 {
1523 int DstMemSize;
1524 unsigned long DstStart;
1525 unsigned long DstPageCount;
1526 struct page **pages = NULL;
1527 uint32_t uv_size, v_size;
1528 uint32_t AllSize;
1529 uint32_t *MMU_Base, *MMU_Base_phys;
1530 int ret;
1531 int status;
1532 struct rga_dma_buffer_t *dma_buffer = NULL;
1533
1534 DstMemSize = 0;
1535 DstPageCount = 0;
1536 DstStart = 0;
1537 MMU_Base = NULL;
1538
1539 do {
1540 if(req->mmu_info.dst_mmu_flag & 1) {
1541 DstPageCount = rga2_buf_size_cal(req->dst.yrgb_addr, req->dst.uv_addr, req->dst.v_addr,
1542 req->dst.format, req->dst.vir_w, req->dst.vir_h,
1543 &DstStart);
1544 if(DstPageCount == 0) {
1545 return -EINVAL;
1546 }
1547 }
1548
1549 DstMemSize = (DstPageCount + 15) & (~15);
1550 AllSize = DstMemSize;
1551
1552 if(rga2_mmu_buf_get_try(&rga2_mmu_buf, AllSize)) {
1553 pr_err("RGA2 Get MMU mem failed\n");
1554 status = RGA2_MALLOC_ERROR;
1555 break;
1556 }
1557
1558 pages = rga2_mmu_buf.pages;
1559 if(pages == NULL) {
1560 pr_err("RGA MMU malloc pages mem failed\n");
1561 return -EINVAL;
1562 }
1563
1564 mutex_lock(&rga2_service.lock);
1565 MMU_Base_phys = rga2_mmu_buf.buf + rga2_mmu_buf.front;
1566 MMU_Base = rga2_mmu_buf.buf_virtual + rga2_mmu_buf.front;
1567 mutex_unlock(&rga2_service.lock);
1568
1569 if (DstMemSize) {
1570 dma_buffer = ®->dma_buffer_dst;
1571
1572 if (dma_buffer->sgt) {
1573 ret = rga2_MapION(dma_buffer->sgt, &MMU_Base[0], DstMemSize);
1574 }
1575 else {
1576 ret = rga2_MapUserMemory(&pages[0], &MMU_Base[0],
1577 DstStart, DstPageCount,
1578 1, MMU_MAP_INVALID);
1579 }
1580 if (ret < 0) {
1581 pr_err("rga2 map dst memory failed\n");
1582 status = ret;
1583 break;
1584 }
1585
1586 /* change the buf address in req struct */
1587 req->mmu_info.dst_base_addr = ((unsigned long)MMU_Base_phys);
1588 req->dst.yrgb_addr = (req->dst.yrgb_addr & (~PAGE_MASK));
1589
1590 uv_size = (req->dst.uv_addr
1591 - (DstStart << PAGE_SHIFT)) >> PAGE_SHIFT;
1592 v_size = (req->dst.v_addr
1593 - (DstStart << PAGE_SHIFT)) >> PAGE_SHIFT;
1594 req->dst.uv_addr = (req->dst.uv_addr & (~PAGE_MASK)) |
1595 ((uv_size) << PAGE_SHIFT);
1596 req->dst.v_addr = (req->dst.v_addr & (~PAGE_MASK)) |
1597 ((v_size) << PAGE_SHIFT);
1598 }
1599
1600 /* flush data to DDR */
1601 rga2_dma_flush_range(MMU_Base, (MMU_Base + AllSize + 1));
1602 rga2_mmu_buf_get(&rga2_mmu_buf, AllSize);
1603 reg->MMU_len = AllSize;
1604
1605 return 0;
1606 }
1607 while(0);
1608
1609 return status;
1610 }
1611
1612
rga2_mmu_info_update_palette_table_mode(struct rga2_reg * reg,struct rga2_req * req)1613 static int rga2_mmu_info_update_palette_table_mode(struct rga2_reg *reg, struct rga2_req *req)
1614 {
1615 int LutMemSize;
1616 unsigned long LutStart;
1617 unsigned long LutPageCount;
1618 struct page **pages = NULL;
1619 uint32_t uv_size, v_size;
1620 uint32_t AllSize;
1621 uint32_t *MMU_Base, *MMU_Base_phys;
1622 int ret, status;
1623 struct rga_dma_buffer_t *dma_buffer = NULL;
1624
1625 MMU_Base = NULL;
1626 LutPageCount = 0;
1627 LutMemSize = 0;
1628 LutStart = 0;
1629
1630 do {
1631 /* cal lut buf mmu info */
1632 if (req->mmu_info.els_mmu_flag & 1) {
1633 req->mmu_info.src0_mmu_flag = req->mmu_info.src0_mmu_flag == 1 ? 0 : req->mmu_info.src0_mmu_flag;
1634 req->mmu_info.src1_mmu_flag = req->mmu_info.src1_mmu_flag == 1 ? 0 : req->mmu_info.src1_mmu_flag;
1635 req->mmu_info.dst_mmu_flag = req->mmu_info.dst_mmu_flag == 1 ? 0 : req->mmu_info.dst_mmu_flag;
1636
1637 LutPageCount = rga2_buf_size_cal(req->pat.yrgb_addr, req->pat.uv_addr, req->pat.v_addr,
1638 req->pat.format, req->pat.vir_w, req->pat.vir_h,
1639 &LutStart);
1640 if(LutPageCount == 0) {
1641 return -EINVAL;
1642 }
1643 }
1644
1645 LutMemSize = (LutPageCount + 15) & (~15);
1646 AllSize = LutMemSize;
1647
1648 if (rga2_mmu_buf_get_try(&rga2_mmu_buf, AllSize)) {
1649 pr_err("RGA2 Get MMU mem failed\n");
1650 status = RGA2_MALLOC_ERROR;
1651 break;
1652 }
1653
1654 pages = rga2_mmu_buf.pages;
1655 if (pages == NULL) {
1656 pr_err("RGA MMU malloc pages mem failed\n");
1657 return -EINVAL;
1658 }
1659
1660 mutex_lock(&rga2_service.lock);
1661 MMU_Base = rga2_mmu_buf.buf_virtual + rga2_mmu_buf.front;
1662 MMU_Base_phys = rga2_mmu_buf.buf + rga2_mmu_buf.front;
1663 mutex_unlock(&rga2_service.lock);
1664
1665 if (LutMemSize) {
1666 dma_buffer = ®->dma_buffer_els;
1667
1668 if (dma_buffer->sgt) {
1669 ret = rga2_MapION(dma_buffer->sgt,
1670 &MMU_Base[0], LutMemSize);
1671 } else {
1672 ret = rga2_MapUserMemory(&pages[0], &MMU_Base[0],
1673 LutStart, LutPageCount, 0, MMU_MAP_CLEAN);
1674 }
1675 if (ret < 0) {
1676 pr_err("rga2 map palette memory failed\n");
1677 status = ret;
1678 break;
1679 }
1680
1681 /* change the buf address in req struct */
1682 req->mmu_info.els_base_addr = (((unsigned long)MMU_Base_phys));
1683
1684 req->pat.yrgb_addr = (req->pat.yrgb_addr & (~PAGE_MASK));
1685
1686 uv_size = (req->pat.uv_addr
1687 - (LutStart << PAGE_SHIFT)) >> PAGE_SHIFT;
1688 v_size = (req->pat.v_addr
1689 - (LutStart << PAGE_SHIFT)) >> PAGE_SHIFT;
1690 req->pat.uv_addr = (req->pat.uv_addr & (~PAGE_MASK)) |
1691 ((uv_size) << PAGE_SHIFT);
1692 req->pat.v_addr = (req->pat.v_addr & (~PAGE_MASK)) |
1693 ((v_size) << PAGE_SHIFT);
1694 }
1695
1696 /* flush data to DDR */
1697 rga2_dma_flush_range(MMU_Base, (MMU_Base + AllSize));
1698 rga2_mmu_buf_get(&rga2_mmu_buf, AllSize);
1699 reg->MMU_len = AllSize;
1700
1701 return 0;
1702 }
1703 while(0);
1704
1705 return status;
1706 }
1707
1708 /*
1709 * yqw:
1710 * This function is currently not sure whether rga2 is used,
1711 * because invalidate/clean cache occupies the parameter
1712 * reg->MMU_base, so block this function first, and re-implement
1713 * this function if necessary.
1714 */
1715 #if 0
1716 static int rga2_mmu_info_update_patten_buff_mode(struct rga2_reg *reg, struct rga2_req *req)
1717 {
1718 int SrcMemSize, CMDMemSize;
1719 unsigned long SrcStart, CMDStart;
1720 struct page **pages = NULL;
1721 uint32_t i;
1722 uint32_t AllSize;
1723 uint32_t *MMU_Base, *MMU_p;
1724 int ret, status;
1725
1726 MMU_Base = MMU_p = 0;
1727
1728 do {
1729 /* cal src buf mmu info */
1730 SrcMemSize = rga2_mem_size_cal(req->pat.yrgb_addr, req->pat.act_w * req->pat.act_h * 4, &SrcStart);
1731 if(SrcMemSize == 0) {
1732 return -EINVAL;
1733 }
1734
1735 /* cal cmd buf mmu info */
1736 CMDMemSize = rga2_mem_size_cal((unsigned long)rga2_service.cmd_buff, RGA2_CMD_BUF_SIZE, &CMDStart);
1737 if(CMDMemSize == 0) {
1738 return -EINVAL;
1739 }
1740
1741 AllSize = SrcMemSize + CMDMemSize;
1742
1743 pages = rga2_mmu_buf.pages;
1744
1745 MMU_Base = kzalloc(AllSize * sizeof(uint32_t), GFP_KERNEL);
1746 if (MMU_Base == NULL)
1747 return -EINVAL;
1748
1749 for(i=0; i<CMDMemSize; i++) {
1750 MMU_Base[i] = virt_to_phys((uint32_t *)((CMDStart + i) << PAGE_SHIFT));
1751 }
1752
1753 if (req->src.yrgb_addr < KERNEL_SPACE_VALID)
1754 {
1755 ret = rga2_MapUserMemory(&pages[CMDMemSize],
1756 &MMU_Base[CMDMemSize],
1757 SrcStart, SrcMemSize,
1758 1, MMU_MAP_CLEAN);
1759 if (ret < 0) {
1760 pr_err("rga map src memory failed\n");
1761 status = ret;
1762 break;
1763 }
1764 }
1765 else
1766 {
1767 MMU_p = MMU_Base + CMDMemSize;
1768
1769 for(i=0; i<SrcMemSize; i++)
1770 {
1771 MMU_p[i] = (uint32_t)virt_to_phys((uint32_t *)((SrcStart + i) << PAGE_SHIFT));
1772 }
1773 }
1774
1775 /* zsq
1776 * change the buf address in req struct
1777 * for the reason of lie to MMU
1778 */
1779 req->mmu_info.src0_base_addr = (virt_to_phys(MMU_Base) >> 2);
1780
1781 req->src.yrgb_addr = (req->src.yrgb_addr & (~PAGE_MASK)) | (CMDMemSize << PAGE_SHIFT);
1782
1783 /*record the malloc buf for the cmd end to release*/
1784 reg->MMU_base = MMU_Base;
1785
1786 /* flush data to DDR */
1787 rga2_dma_flush_range(MMU_Base, (MMU_Base + AllSize));
1788 return 0;
1789
1790 }
1791 while(0);
1792
1793 return status;
1794 }
1795 #endif
1796
rga2_set_mmu_info(struct rga2_reg * reg,struct rga2_req * req)1797 int rga2_set_mmu_info(struct rga2_reg *reg, struct rga2_req *req)
1798 {
1799 int ret;
1800
1801 if (reg->MMU_map == true) {
1802 ret = rga2_mmu_flush_cache(reg, req);
1803 return ret;
1804 }
1805
1806 switch (req->render_mode) {
1807 case bitblt_mode :
1808 ret = rga2_mmu_info_BitBlt_mode(reg, req);
1809 break;
1810 case color_palette_mode :
1811 ret = rga2_mmu_info_color_palette_mode(reg, req);
1812 break;
1813 case color_fill_mode :
1814 ret = rga2_mmu_info_color_fill_mode(reg, req);
1815 break;
1816 case update_palette_table_mode :
1817 ret = rga2_mmu_info_update_palette_table_mode(reg, req);
1818 break;
1819 #if 0
1820 case update_patten_buff_mode :
1821 ret = rga2_mmu_info_update_patten_buff_mode(reg, req);
1822 break;
1823 #endif
1824 default :
1825 ret = -1;
1826 break;
1827 }
1828
1829 return ret;
1830 }
1831
1832