xref: /OK3568_Linux_fs/kernel/drivers/video/rockchip/rga/rga_mmu_info.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 
3 
4 #include <linux/version.h>
5 #include <linux/init.h>
6 #include <linux/module.h>
7 #include <linux/fs.h>
8 #include <linux/sched.h>
9 #include <linux/signal.h>
10 #include <linux/pagemap.h>
11 #include <linux/seq_file.h>
12 #include <linux/mm.h>
13 #include <linux/mman.h>
14 #include <linux/sched.h>
15 #include <linux/slab.h>
16 #include <linux/memory.h>
17 #include <linux/dma-mapping.h>
18 #include <asm/memory.h>
19 #include <asm/atomic.h>
20 #include <asm/cacheflush.h>
21 #include "rga_mmu_info.h"
22 #include <linux/delay.h>
23 
24 extern rga_service_info rga_service;
25 extern struct rga_mmu_buf_t rga_mmu_buf;
26 
27 #if RGA_DEBUGFS
28 extern int RGA_CHECK_MODE;
29 #endif
30 
31 #define KERNEL_SPACE_VALID    0xc0000000
32 
rga_dma_flush_range(void * pstart,void * pend)33 void rga_dma_flush_range(void *pstart, void *pend)
34 {
35 	dma_sync_single_for_device(rga_drvdata->dev, virt_to_phys(pstart), pend - pstart, DMA_TO_DEVICE);
36 }
37 
rga_mmu_buf_get(struct rga_mmu_buf_t * t,uint32_t size)38 static int rga_mmu_buf_get(struct rga_mmu_buf_t *t, uint32_t size)
39 {
40     mutex_lock(&rga_service.lock);
41     t->front += size;
42     mutex_unlock(&rga_service.lock);
43 
44     return 0;
45 }
46 
rga_current_mm_read_lock(struct mm_struct * mm)47 static void rga_current_mm_read_lock(struct mm_struct *mm)
48 {
49 #if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 10, 0)
50 	mmap_read_lock(mm);
51 #else
52 	down_read(&mm->mmap_sem);
53 #endif
54 }
55 
rga_current_mm_read_unlock(struct mm_struct * mm)56 static void rga_current_mm_read_unlock(struct mm_struct *mm)
57 {
58 #if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 10, 0)
59 	mmap_read_unlock(mm);
60 #else
61 	up_read(&mm->mmap_sem);
62 #endif
63 }
64 
rga_get_user_pages(struct page ** pages,unsigned long Memory,uint32_t pageCount,int writeFlag,struct mm_struct * current_mm)65 static long rga_get_user_pages(struct page **pages, unsigned long Memory,
66 			       uint32_t pageCount, int writeFlag,
67 			       struct mm_struct *current_mm)
68 {
69 	#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 168) && \
70 		LINUX_VERSION_CODE < KERNEL_VERSION(4, 5, 0)
71 		return get_user_pages(current, current_mm, Memory << PAGE_SHIFT,
72 				      pageCount, writeFlag ? FOLL_WRITE : 0, pages, NULL);
73 	#elif LINUX_VERSION_CODE < KERNEL_VERSION(4, 6, 0)
74 		return get_user_pages(current, current_mm, Memory << PAGE_SHIFT,
75 				      pageCount, writeFlag ? FOLL_WRITE : 0, 0, pages, NULL);
76 	#elif LINUX_VERSION_CODE < KERNEL_VERSION(5, 10, 0)
77 		return get_user_pages_remote(current, current_mm, Memory << PAGE_SHIFT,
78 					     pageCount, writeFlag ? FOLL_WRITE : 0, pages,
79 					     NULL, NULL);
80 	#else
81 		return get_user_pages_remote(current_mm, Memory << PAGE_SHIFT,
82 					     pageCount, writeFlag ? FOLL_WRITE : 0, pages,
83 					     NULL, NULL);
84 	#endif
85 }
86 
rga_mmu_buf_get_try(struct rga_mmu_buf_t * t,uint32_t size)87 static int rga_mmu_buf_get_try(struct rga_mmu_buf_t *t, uint32_t size)
88 {
89 	int ret = 0;
90 
91 	mutex_lock(&rga_service.lock);
92 	if ((t->back - t->front) > t->size) {
93 		if(t->front + size > t->back - t->size) {
94 			ret = -ENOMEM;
95 			goto out;
96 		}
97 	} else {
98 		if ((t->front + size) > t->back) {
99 			ret = -ENOMEM;
100 			goto out;
101 		}
102 		if (t->front + size > t->size) {
103 			if (size > (t->back - t->size)) {
104 				ret = -ENOMEM;
105 				goto out;
106 			}
107 			t->front = 0;
108 		}
109 	}
110 
111 out:
112 	mutex_unlock(&rga_service.lock);
113 	return ret;
114 }
115 
rga_mem_size_cal(unsigned long Mem,uint32_t MemSize,unsigned long * StartAddr)116 static int rga_mem_size_cal(unsigned long Mem, uint32_t MemSize, unsigned long *StartAddr)
117 {
118     unsigned long start, end;
119     uint32_t pageCount;
120 
121     end = (Mem + (MemSize + PAGE_SIZE - 1)) >> PAGE_SHIFT;
122     start = Mem >> PAGE_SHIFT;
123     pageCount = end - start;
124     *StartAddr = start;
125     return pageCount;
126 }
127 
rga_buf_size_cal(unsigned long yrgb_addr,unsigned long uv_addr,unsigned long v_addr,int format,uint32_t w,uint32_t h,unsigned long * StartAddr)128 static int rga_buf_size_cal(unsigned long yrgb_addr, unsigned long uv_addr, unsigned long v_addr,
129                                         int format, uint32_t w, uint32_t h, unsigned long *StartAddr )
130 {
131     uint32_t size_yrgb = 0;
132     uint32_t size_uv = 0;
133     uint32_t size_v = 0;
134     uint32_t stride = 0;
135     unsigned long start, end;
136     uint32_t pageCount;
137 
138     switch(format)
139     {
140         case RK_FORMAT_RGBA_8888 :
141             stride = (w * 4 + 3) & (~3);
142             size_yrgb = stride*h;
143             start = yrgb_addr >> PAGE_SHIFT;
144             pageCount = (size_yrgb + PAGE_SIZE - 1) >> PAGE_SHIFT;
145             break;
146         case RK_FORMAT_RGBX_8888 :
147             stride = (w * 4 + 3) & (~3);
148             size_yrgb = stride*h;
149             start = yrgb_addr >> PAGE_SHIFT;
150             pageCount = (size_yrgb + PAGE_SIZE - 1) >> PAGE_SHIFT;
151             break;
152         case RK_FORMAT_RGB_888 :
153             stride = (w * 3 + 3) & (~3);
154             size_yrgb = stride*h;
155             start = yrgb_addr >> PAGE_SHIFT;
156             pageCount = (size_yrgb + PAGE_SIZE - 1) >> PAGE_SHIFT;
157             break;
158         case RK_FORMAT_BGRA_8888 :
159             size_yrgb = w*h*4;
160             start = yrgb_addr >> PAGE_SHIFT;
161             pageCount = (size_yrgb + PAGE_SIZE - 1) >> PAGE_SHIFT;
162             break;
163         case RK_FORMAT_RGB_565 :
164             stride = (w*2 + 3) & (~3);
165             size_yrgb = stride * h;
166             start = yrgb_addr >> PAGE_SHIFT;
167             pageCount = (size_yrgb + PAGE_SIZE - 1) >> PAGE_SHIFT;
168             break;
169         case RK_FORMAT_RGBA_5551 :
170             stride = (w*2 + 3) & (~3);
171             size_yrgb = stride * h;
172             start = yrgb_addr >> PAGE_SHIFT;
173             pageCount = (size_yrgb + PAGE_SIZE - 1) >> PAGE_SHIFT;
174             break;
175         case RK_FORMAT_RGBA_4444 :
176             stride = (w*2 + 3) & (~3);
177             size_yrgb = stride * h;
178             start = yrgb_addr >> PAGE_SHIFT;
179             pageCount = (size_yrgb + PAGE_SIZE - 1) >> PAGE_SHIFT;
180             break;
181         case RK_FORMAT_BGR_888 :
182             stride = (w*3 + 3) & (~3);
183             size_yrgb = stride * h;
184             start = yrgb_addr >> PAGE_SHIFT;
185             pageCount = (size_yrgb + PAGE_SIZE - 1) >> PAGE_SHIFT;
186             break;
187 
188         /* YUV FORMAT */
189         case RK_FORMAT_YCbCr_422_SP :
190             stride = (w + 3) & (~3);
191             size_yrgb = stride * h;
192             size_uv = stride * h;
193             start = MIN(yrgb_addr, uv_addr);
194 
195             start >>= PAGE_SHIFT;
196             end = MAX((yrgb_addr + size_yrgb), (uv_addr + size_uv));
197             end = (end + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
198             pageCount = end - start;
199             break;
200         case RK_FORMAT_YCbCr_422_P :
201             stride = (w + 3) & (~3);
202             size_yrgb = stride * h;
203             size_uv = ((stride >> 1) * h);
204             size_v = ((stride >> 1) * h);
205             start = MIN(MIN(yrgb_addr, uv_addr), v_addr);
206             start = start >> PAGE_SHIFT;
207             end = MAX(MAX((yrgb_addr + size_yrgb), (uv_addr + size_uv)), (v_addr + size_v));
208             end = (end + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
209             pageCount = end - start;
210             break;
211         case RK_FORMAT_YCbCr_420_SP :
212             stride = (w + 3) & (~3);
213             size_yrgb = stride * h;
214             size_uv = (stride * (h >> 1));
215             start = MIN(yrgb_addr, uv_addr);
216             start >>= PAGE_SHIFT;
217             end = MAX((yrgb_addr + size_yrgb), (uv_addr + size_uv));
218             end = (end + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
219             pageCount = end - start;
220             break;
221         case RK_FORMAT_YCbCr_420_P :
222             stride = (w + 3) & (~3);
223             size_yrgb = stride * h;
224             size_uv = ((stride >> 1) * (h >> 1));
225             size_v = ((stride >> 1) * (h >> 1));
226             start = MIN(MIN(yrgb_addr, uv_addr), v_addr);
227             start >>= PAGE_SHIFT;
228             end = MAX(MAX((yrgb_addr + size_yrgb), (uv_addr + size_uv)), (v_addr + size_v));
229             end = (end + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
230             pageCount = end - start;
231             break;
232 
233         case RK_FORMAT_YCrCb_422_SP :
234             stride = (w + 3) & (~3);
235             size_yrgb = stride * h;
236             size_uv = stride * h;
237             start = MIN(yrgb_addr, uv_addr);
238             start >>= PAGE_SHIFT;
239             end = MAX((yrgb_addr + size_yrgb), (uv_addr + size_uv));
240             end = (end + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
241             pageCount = end - start;
242             break;
243         case RK_FORMAT_YCrCb_422_P :
244             stride = (w + 3) & (~3);
245             size_yrgb = stride * h;
246             size_uv = ((stride >> 1) * h);
247             size_v = ((stride >> 1) * h);
248             start = MIN(MIN(yrgb_addr, uv_addr), v_addr);
249             start >>= PAGE_SHIFT;
250             end = MAX(MAX((yrgb_addr + size_yrgb), (uv_addr + size_uv)), (v_addr + size_v));
251             end = (end + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
252             pageCount = end - start;
253             break;
254 
255         case RK_FORMAT_YCrCb_420_SP :
256             stride = (w + 3) & (~3);
257             size_yrgb = stride * h;
258             size_uv = (stride * (h >> 1));
259             start = MIN(yrgb_addr, uv_addr);
260             start >>= PAGE_SHIFT;
261             end = MAX((yrgb_addr + size_yrgb), (uv_addr + size_uv));
262             end = (end + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
263             pageCount = end - start;
264             break;
265         case RK_FORMAT_YCrCb_420_P :
266             stride = (w + 3) & (~3);
267             size_yrgb = stride * h;
268             size_uv = ((stride >> 1) * (h >> 1));
269             size_v = ((stride >> 1) * (h >> 1));
270             start = MIN(MIN(yrgb_addr, uv_addr), v_addr);
271             start >>= PAGE_SHIFT;
272             end = MAX(MAX((yrgb_addr + size_yrgb), (uv_addr + size_uv)), (v_addr + size_v));
273             end = (end + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
274             pageCount = end - start;
275             break;
276         #if 0
277         case RK_FORMAT_BPP1 :
278             break;
279         case RK_FORMAT_BPP2 :
280             break;
281         case RK_FORMAT_BPP4 :
282             break;
283         case RK_FORMAT_BPP8 :
284             break;
285         #endif
286         default :
287             pageCount = 0;
288             start = 0;
289             break;
290     }
291 
292     *StartAddr = start;
293     return pageCount;
294 }
295 
296 #if RGA_DEBUGFS
rga_usermemory_cheeck(struct page ** pages,u32 w,u32 h,u32 format,int flag)297 static int rga_usermemory_cheeck(struct page **pages, u32 w, u32 h, u32 format, int flag)
298 {
299 	int bits;
300 	void *vaddr = NULL;
301 	int taipage_num;
302 	int taidata_num;
303 	int *tai_vaddr = NULL;
304 
305 	switch (format) {
306 	case RK_FORMAT_RGBA_8888:
307 	case RK_FORMAT_RGBX_8888:
308 	case RK_FORMAT_BGRA_8888:
309 		bits = 32;
310 		break;
311 	case RK_FORMAT_RGB_888:
312 	case RK_FORMAT_BGR_888:
313 		bits = 24;
314 		break;
315 	case RK_FORMAT_RGB_565:
316 	case RK_FORMAT_RGBA_5551:
317 	case RK_FORMAT_RGBA_4444:
318 	case RK_FORMAT_YCbCr_422_SP:
319 	case RK_FORMAT_YCbCr_422_P:
320 	case RK_FORMAT_YCrCb_422_SP:
321 	case RK_FORMAT_YCrCb_422_P:
322 		bits = 16;
323 		break;
324 	case RK_FORMAT_YCbCr_420_SP:
325 	case RK_FORMAT_YCbCr_420_P:
326 	case RK_FORMAT_YCrCb_420_SP:
327 	case RK_FORMAT_YCrCb_420_P:
328 		bits = 12;
329 		break;
330 	case RK_FORMAT_YCbCr_420_SP_10B:
331 	case RK_FORMAT_YCrCb_420_SP_10B:
332 		bits = 15;
333 		break;
334 	default:
335 		printk(KERN_DEBUG "un know format\n");
336 		return -1;
337 	}
338 	taipage_num = w * h * bits / 8 / (1024 * 4);
339 	taidata_num = w * h * bits / 8 % (1024 * 4);
340 	if (taidata_num == 0) {
341 		vaddr = kmap(pages[taipage_num - 1]);
342 		tai_vaddr = (int *)vaddr + 1023;
343 	} else {
344 		vaddr = kmap(pages[taipage_num]);
345 		tai_vaddr = (int *)vaddr + taidata_num / 4 - 1;
346 	}
347 	if (flag == 1) {
348 		printk(KERN_DEBUG "src user memory check\n");
349 		printk(KERN_DEBUG "tai data is %d\n", *tai_vaddr);
350 	} else {
351 		printk(KERN_DEBUG "dst user memory check\n");
352 		printk(KERN_DEBUG "tai data is %d\n", *tai_vaddr);
353 	}
354 	if (taidata_num == 0)
355 		kunmap(pages[taipage_num - 1]);
356 	else
357 		kunmap(pages[taipage_num]);
358 	return 0;
359 }
360 #endif
361 
rga_MapUserMemory(struct page ** pages,uint32_t * pageTable,unsigned long Memory,uint32_t pageCount)362 static int rga_MapUserMemory(struct page **pages,
363                                             uint32_t *pageTable,
364                                             unsigned long Memory,
365                                             uint32_t pageCount)
366 {
367     int32_t result;
368     uint32_t i;
369     uint32_t status;
370     unsigned long Address;
371 
372     status = 0;
373     Address = 0;
374 
375     do {
376         rga_current_mm_read_lock(current->mm);
377 
378 	result = rga_get_user_pages(pages, Memory, pageCount, 1, current->mm);
379 
380         rga_current_mm_read_unlock(current->mm);
381 
382         #if 0
383         if(result <= 0 || result < pageCount)
384         {
385             status = 0;
386 
387             for(i=0; i<pageCount; i++)
388             {
389                 temp = armv7_va_to_pa((Memory + i) << PAGE_SHIFT);
390                 if (temp == 0xffffffff)
391                 {
392                     printk("rga find mmu phy ddr error\n ");
393                     status = RGA_OUT_OF_RESOURCES;
394                     break;
395                 }
396 
397                 pageTable[i] = temp;
398             }
399 
400             return status;
401         }
402         #else
403         if(result <= 0 || result < pageCount)
404         {
405             struct vm_area_struct *vma;
406 
407             if (result>0) {
408 		rga_current_mm_read_lock(current->mm);
409 
410 		for (i = 0; i < result; i++)
411 			put_page(pages[i]);
412 
413 		rga_current_mm_read_unlock(current->mm);
414 	    }
415 
416             for(i=0; i<pageCount; i++)
417             {
418                 vma = find_vma(current->mm, (Memory + i) << PAGE_SHIFT);
419 
420                 if (vma)//&& (vma->vm_flags & VM_PFNMAP) )
421                 {
422                     do
423                     {
424                         pte_t       * pte;
425                         spinlock_t  * ptl;
426                         unsigned long pfn;
427                         pgd_t * pgd;
428 #if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 10, 0)
429 						p4d_t * p4d;
430 #endif
431                         pud_t * pud;
432 
433                         pgd = pgd_offset(current->mm, (Memory + i) << PAGE_SHIFT);
434 
435                         if(pgd_val(*pgd) == 0)
436                         {
437                             //printk("rga pgd value is zero \n");
438                             break;
439                         }
440 
441 #if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 10, 0)
442 						/* In the four-level page table, it will do nothing and return pgd. */
443 						p4d = p4d_offset(pgd, (Memory + i) << PAGE_SHIFT);
444 						if (p4d_none(*p4d) || unlikely(p4d_bad(*p4d))) {
445 							pr_err("RGA2 failed to get p4d, result = %d, pageCount = %d\n",
446 								   result, pageCount);
447 							status = RGA_OUT_OF_RESOURCES;
448 							break;
449 						}
450 
451 						pud = pud_offset(p4d, (Memory + i) << PAGE_SHIFT);
452 #else
453 						pud = pud_offset(pgd, (Memory + i) << PAGE_SHIFT);
454 #endif
455                         if (pud)
456                         {
457                             pmd_t * pmd = pmd_offset(pud, (Memory + i) << PAGE_SHIFT);
458                             if (pmd)
459                             {
460                                 pte = pte_offset_map_lock(current->mm, pmd, (Memory + i) << PAGE_SHIFT, &ptl);
461                                 if (!pte)
462                                 {
463                                     pte_unmap_unlock(pte, ptl);
464                                     break;
465                                 }
466                             }
467                             else
468                             {
469                                 break;
470                             }
471                         }
472                         else
473                         {
474                             break;
475                         }
476 
477                         pfn = pte_pfn(*pte);
478                         Address = ((pfn << PAGE_SHIFT) | (((unsigned long)((Memory + i) << PAGE_SHIFT)) & ~PAGE_MASK));
479                         pte_unmap_unlock(pte, ptl);
480                     }
481                     while (0);
482 
483                     pageTable[i] = Address;
484                 }
485                 else
486                 {
487                     status = RGA_OUT_OF_RESOURCES;
488                     break;
489                 }
490             }
491 
492             return status;
493         }
494         #endif
495 
496         /* Fill the page table. */
497         for(i=0; i<pageCount; i++)
498         {
499             /* Get the physical address from page struct. */
500             pageTable[i] = page_to_phys(pages[i]);
501         }
502 
503 	rga_current_mm_read_lock(current->mm);
504 
505 	for (i = 0; i < result; i++)
506 		put_page(pages[i]);
507 
508 	rga_current_mm_read_unlock(current->mm);
509 
510         return 0;
511     }
512     while(0);
513 
514     return status;
515 }
516 
rga_MapION(struct sg_table * sg,uint32_t * Memory,int32_t pageCount,uint32_t offset)517 static int rga_MapION(struct sg_table *sg,
518                                uint32_t *Memory,
519                                int32_t  pageCount,
520                                uint32_t offset)
521 {
522     uint32_t i;
523     uint32_t status;
524     unsigned long Address;
525     uint32_t mapped_size = 0;
526     uint32_t len = 0;
527     struct scatterlist *sgl = sg->sgl;
528     uint32_t sg_num = 0;
529 
530     status = 0;
531     Address = 0;
532     offset = offset >> PAGE_SHIFT;
533     if (offset != 0) {
534         do {
535             len += (sg_dma_len(sgl) >> PAGE_SHIFT);
536 	        if (len == offset) {
537 	    	    sg_num += 1;
538 		    break;
539     	    }
540     	    else {
541                 if (len > offset)
542                      break;
543     	    }
544                 sg_num += 1;
545         }
546         while((sgl = sg_next(sgl)) && (mapped_size < pageCount) && (sg_num < sg->nents));
547 
548         sgl = sg->sgl;
549     	len = 0;
550         do {
551             len += (sg_dma_len(sgl) >> PAGE_SHIFT);
552             sgl = sg_next(sgl);
553         }
554         while(--sg_num);
555 
556         offset -= len;
557 
558         len = sg_dma_len(sgl) >> PAGE_SHIFT;
559         Address = sg_phys(sgl);
560     	Address += offset;
561 
562         for(i=offset; i<len; i++) {
563              Memory[i - offset] = Address + (i << PAGE_SHIFT);
564         }
565         mapped_size += (len - offset);
566         sg_num = 1;
567         sgl = sg_next(sgl);
568         do {
569             len = sg_dma_len(sgl) >> PAGE_SHIFT;
570             Address = sg_phys(sgl);
571 
572             for(i=0; i<len; i++) {
573                 Memory[mapped_size + i] = Address + (i << PAGE_SHIFT);
574             }
575 
576             mapped_size += len;
577             sg_num += 1;
578         }
579         while((sgl = sg_next(sgl)) && (mapped_size < pageCount) && (sg_num < sg->nents));
580     }
581     else {
582         do {
583             len = sg_dma_len(sgl) >> PAGE_SHIFT;
584             Address = sg_phys(sgl);
585             for(i=0; i<len; i++) {
586                 Memory[mapped_size + i] = Address + (i << PAGE_SHIFT);
587             }
588             mapped_size += len;
589             sg_num += 1;
590         }
591         while((sgl = sg_next(sgl)) && (mapped_size < pageCount) && (sg_num < sg->nents));
592     }
593     return 0;
594 }
595 
596 
rga_mmu_info_BitBlt_mode(struct rga_reg * reg,struct rga_req * req)597 static int rga_mmu_info_BitBlt_mode(struct rga_reg *reg, struct rga_req *req)
598 {
599     int SrcMemSize, DstMemSize;
600     unsigned long SrcStart, DstStart;
601     uint32_t i;
602     uint32_t AllSize;
603     uint32_t *MMU_Base, *MMU_p, *MMU_Base_phys;
604     int ret;
605     int status;
606     uint32_t uv_size, v_size;
607 
608     struct page **pages = NULL;
609 
610     MMU_Base = NULL;
611 
612     SrcMemSize = 0;
613     DstMemSize = 0;
614 
615     do {
616         /* cal src buf mmu info */
617         SrcMemSize = rga_buf_size_cal(req->src.yrgb_addr, req->src.uv_addr, req->src.v_addr,
618                                         req->src.format, req->src.vir_w, req->src.act_h + req->src.y_offset,
619                                         &SrcStart);
620         if(SrcMemSize == 0) {
621             return -EINVAL;
622         }
623 
624         /* cal dst buf mmu info */
625 
626         DstMemSize = rga_buf_size_cal(req->dst.yrgb_addr, req->dst.uv_addr, req->dst.v_addr,
627                                         req->dst.format, req->dst.vir_w, req->dst.vir_h,
628                                         &DstStart);
629         if(DstMemSize == 0)
630             return -EINVAL;
631 
632         /* Cal out the needed mem size */
633         SrcMemSize = (SrcMemSize + 15) & (~15);
634         DstMemSize = (DstMemSize + 15) & (~15);
635         AllSize = SrcMemSize + DstMemSize;
636 
637         if (rga_mmu_buf_get_try(&rga_mmu_buf, AllSize + 16)) {
638             pr_err("RGA Get MMU mem failed\n");
639             status = RGA_MALLOC_ERROR;
640             break;
641         }
642 
643         mutex_lock(&rga_service.lock);
644         MMU_Base = rga_mmu_buf.buf_virtual + (rga_mmu_buf.front & (rga_mmu_buf.size - 1));
645         MMU_Base_phys = rga_mmu_buf.buf + (rga_mmu_buf.front & (rga_mmu_buf.size - 1));
646         mutex_unlock(&rga_service.lock);
647 
648         pages = rga_mmu_buf.pages;
649 
650         if((req->mmu_info.mmu_flag >> 8) & 1) {
651             if (req->sg_src) {
652                 ret = rga_MapION(req->sg_src, &MMU_Base[0], SrcMemSize, req->line_draw_info.flag);
653             }
654             else {
655                 ret = rga_MapUserMemory(&pages[0], &MMU_Base[0], SrcStart, SrcMemSize);
656                 if (ret < 0) {
657                     pr_err("rga map src memory failed\n");
658                     status = ret;
659                     break;
660                 }
661 
662 #if RGA_DEBUGFS
663 	if (RGA_CHECK_MODE)
664 		rga_usermemory_cheeck(&pages[0], req->src.vir_w,
665 				      req->src.vir_h, req->src.format, 1);
666 #endif
667             }
668         }
669         else {
670             MMU_p = MMU_Base;
671 
672             if(req->src.yrgb_addr == (unsigned long)rga_service.pre_scale_buf) {
673                 for(i=0; i<SrcMemSize; i++)
674                     MMU_p[i] = rga_service.pre_scale_buf[i];
675             }
676             else {
677                 for(i=0; i<SrcMemSize; i++)
678                     MMU_p[i] = (uint32_t)((SrcStart + i) << PAGE_SHIFT);
679             }
680         }
681 
682         if ((req->mmu_info.mmu_flag >> 10) & 1) {
683             if (req->sg_dst) {
684                 ret = rga_MapION(req->sg_dst, &MMU_Base[SrcMemSize], DstMemSize, req->line_draw_info.line_width);
685             }
686             else {
687                 ret = rga_MapUserMemory(&pages[SrcMemSize], &MMU_Base[SrcMemSize], DstStart, DstMemSize);
688                 if (ret < 0) {
689                     pr_err("rga map dst memory failed\n");
690                     status = ret;
691                     break;
692                 }
693 
694 #if RGA_DEBUGFS
695 	if (RGA_CHECK_MODE)
696 		rga_usermemory_cheeck(&pages[0], req->src.vir_w,
697 				      req->src.vir_h, req->src.format, 2);
698 #endif
699             }
700         }
701         else {
702             MMU_p = MMU_Base + SrcMemSize;
703             for(i=0; i<DstMemSize; i++)
704                 MMU_p[i] = (uint32_t)((DstStart + i) << PAGE_SHIFT);
705         }
706 
707         MMU_Base[AllSize] = MMU_Base[AllSize-1];
708 
709         /* zsq
710          * change the buf address in req struct
711          */
712 
713         req->mmu_info.base_addr = (unsigned long)MMU_Base_phys >> 2;
714 
715         uv_size = (req->src.uv_addr - (SrcStart << PAGE_SHIFT)) >> PAGE_SHIFT;
716         v_size = (req->src.v_addr - (SrcStart << PAGE_SHIFT)) >> PAGE_SHIFT;
717 
718         req->src.yrgb_addr = (req->src.yrgb_addr & (~PAGE_MASK));
719         req->src.uv_addr = (req->src.uv_addr & (~PAGE_MASK)) | (uv_size << PAGE_SHIFT);
720         req->src.v_addr = (req->src.v_addr & (~PAGE_MASK)) | (v_size << PAGE_SHIFT);
721 
722         uv_size = (req->dst.uv_addr - (DstStart << PAGE_SHIFT)) >> PAGE_SHIFT;
723 
724         req->dst.yrgb_addr = (req->dst.yrgb_addr & (~PAGE_MASK)) | (SrcMemSize << PAGE_SHIFT);
725         req->dst.uv_addr = (req->dst.uv_addr & (~PAGE_MASK)) | ((SrcMemSize + uv_size) << PAGE_SHIFT);
726 
727         /* flush data to DDR */
728         rga_dma_flush_range(MMU_Base, (MMU_Base + AllSize + 1));
729 
730         rga_mmu_buf_get(&rga_mmu_buf, AllSize + 16);
731         reg->MMU_len = AllSize + 16;
732 
733         status = 0;
734 
735         return status;
736     }
737     while(0);
738 
739     return status;
740 }
741 
rga_mmu_info_color_palette_mode(struct rga_reg * reg,struct rga_req * req)742 static int rga_mmu_info_color_palette_mode(struct rga_reg *reg, struct rga_req *req)
743 {
744     int SrcMemSize, DstMemSize, CMDMemSize;
745     unsigned long SrcStart, DstStart, CMDStart;
746     struct page **pages = NULL;
747     uint32_t i;
748     uint32_t AllSize;
749     uint32_t *MMU_Base = NULL, *MMU_Base_phys = NULL;
750     uint32_t *MMU_p;
751     int ret, status = 0;
752     uint32_t stride;
753 
754     uint8_t shift;
755     uint16_t sw, byte_num;
756 
757     shift = 3 - (req->palette_mode & 3);
758     sw = req->src.vir_w;
759     byte_num = sw >> shift;
760     stride = (byte_num + 3) & (~3);
761 
762     do {
763         SrcMemSize = rga_mem_size_cal(req->src.yrgb_addr, stride, &SrcStart);
764         if(SrcMemSize == 0) {
765             return -EINVAL;
766         }
767 
768         DstMemSize = rga_buf_size_cal(req->dst.yrgb_addr, req->dst.uv_addr, req->dst.v_addr,
769                                         req->dst.format, req->dst.vir_w, req->dst.vir_h,
770                                         &DstStart);
771         if(DstMemSize == 0) {
772             return -EINVAL;
773         }
774 
775         CMDMemSize = rga_mem_size_cal((unsigned long)rga_service.cmd_buff, RGA_CMD_BUF_SIZE, &CMDStart);
776         if(CMDMemSize == 0) {
777             return -EINVAL;
778         }
779 
780         SrcMemSize = (SrcMemSize + 15) & (~15);
781         DstMemSize = (DstMemSize + 15) & (~15);
782         CMDMemSize = (CMDMemSize + 15) & (~15);
783 
784         AllSize = SrcMemSize + DstMemSize + CMDMemSize;
785 
786         if (rga_mmu_buf_get_try(&rga_mmu_buf, AllSize + 16)) {
787             pr_err("RGA Get MMU mem failed\n");
788             status = RGA_MALLOC_ERROR;
789             break;
790         }
791 
792         mutex_lock(&rga_service.lock);
793         MMU_Base = rga_mmu_buf.buf_virtual + (rga_mmu_buf.front & (rga_mmu_buf.size - 1));
794         MMU_Base_phys = rga_mmu_buf.buf + (rga_mmu_buf.front & (rga_mmu_buf.size - 1));
795         mutex_unlock(&rga_service.lock);
796 
797         pages = rga_mmu_buf.pages;
798 
799         /* map CMD addr */
800         for(i=0; i<CMDMemSize; i++) {
801             MMU_Base[i] = (uint32_t)virt_to_phys((uint32_t *)((CMDStart + i)<<PAGE_SHIFT));
802         }
803 
804         /* map src addr */
805         if (req->src.yrgb_addr < KERNEL_SPACE_VALID) {
806             ret = rga_MapUserMemory(&pages[CMDMemSize], &MMU_Base[CMDMemSize], SrcStart, SrcMemSize);
807             if (ret < 0) {
808                 pr_err("rga map src memory failed\n");
809                 status = ret;
810                 break;
811             }
812         }
813         else {
814             MMU_p = MMU_Base + CMDMemSize;
815 
816             for(i=0; i<SrcMemSize; i++)
817             {
818                 MMU_p[i] = (uint32_t)virt_to_phys((uint32_t *)((SrcStart + i) << PAGE_SHIFT));
819             }
820         }
821 
822         /* map dst addr */
823         if (req->src.yrgb_addr < KERNEL_SPACE_VALID) {
824             ret = rga_MapUserMemory(&pages[CMDMemSize + SrcMemSize], &MMU_Base[CMDMemSize + SrcMemSize], DstStart, DstMemSize);
825             if (ret < 0) {
826                 pr_err("rga map dst memory failed\n");
827                 status = ret;
828                 break;
829             }
830         }
831         else {
832             MMU_p = MMU_Base + CMDMemSize + SrcMemSize;
833             for(i=0; i<DstMemSize; i++)
834                 MMU_p[i] = (uint32_t)virt_to_phys((uint32_t *)((DstStart + i) << PAGE_SHIFT));
835         }
836 
837 
838         /* zsq
839          * change the buf address in req struct
840          * for the reason of lie to MMU
841          */
842         req->mmu_info.base_addr = (virt_to_phys(MMU_Base)>>2);
843         req->src.yrgb_addr = (req->src.yrgb_addr & (~PAGE_MASK)) | (CMDMemSize << PAGE_SHIFT);
844         req->dst.yrgb_addr = (req->dst.yrgb_addr & (~PAGE_MASK)) | ((CMDMemSize + SrcMemSize) << PAGE_SHIFT);
845 
846         /*record the malloc buf for the cmd end to release*/
847         reg->MMU_base = MMU_Base;
848 
849         /* flush data to DDR */
850         rga_dma_flush_range(MMU_Base, (MMU_Base + AllSize + 1));
851 
852         rga_mmu_buf_get(&rga_mmu_buf, AllSize + 16);
853         reg->MMU_len = AllSize + 16;
854 
855         return status;
856 
857     }
858     while(0);
859 
860     return 0;
861 }
862 
rga_mmu_info_color_fill_mode(struct rga_reg * reg,struct rga_req * req)863 static int rga_mmu_info_color_fill_mode(struct rga_reg *reg, struct rga_req *req)
864 {
865     int DstMemSize;
866     unsigned long DstStart;
867     struct page **pages = NULL;
868     uint32_t i;
869     uint32_t AllSize;
870     uint32_t *MMU_Base, *MMU_p, *MMU_Base_phys;
871     int ret;
872     int status;
873 
874     MMU_Base = NULL;
875 
876     do {
877         DstMemSize = rga_buf_size_cal(req->dst.yrgb_addr, req->dst.uv_addr, req->dst.v_addr,
878                                         req->dst.format, req->dst.vir_w, req->dst.vir_h,
879                                         &DstStart);
880         if(DstMemSize == 0) {
881             return -EINVAL;
882         }
883 
884         AllSize = (DstMemSize + 15) & (~15);
885 
886         pages = rga_mmu_buf.pages;
887 
888         if (rga_mmu_buf_get_try(&rga_mmu_buf, AllSize + 16)) {
889             pr_err("RGA Get MMU mem failed\n");
890             status = RGA_MALLOC_ERROR;
891             break;
892         }
893 
894         mutex_lock(&rga_service.lock);
895         MMU_Base = rga_mmu_buf.buf_virtual + (rga_mmu_buf.front & (rga_mmu_buf.size - 1));
896         MMU_Base_phys = rga_mmu_buf.buf + (rga_mmu_buf.front & (rga_mmu_buf.size - 1));
897         mutex_unlock(&rga_service.lock);
898 
899         if (req->dst.yrgb_addr < KERNEL_SPACE_VALID) {
900             if (req->sg_dst) {
901                 ret = rga_MapION(req->sg_dst, &MMU_Base[0], DstMemSize, req->line_draw_info.line_width);
902             }
903             else {
904                 ret = rga_MapUserMemory(&pages[0], &MMU_Base[0], DstStart, DstMemSize);
905                 if (ret < 0) {
906                     pr_err("rga map dst memory failed\n");
907                     status = ret;
908                     break;
909                 }
910             }
911         }
912         else {
913             MMU_p = MMU_Base;
914             for(i=0; i<DstMemSize; i++)
915                 MMU_p[i] = (uint32_t)((DstStart + i) << PAGE_SHIFT);
916         }
917 
918         MMU_Base[AllSize] = MMU_Base[AllSize - 1];
919 
920         /* zsq
921          * change the buf address in req struct
922          */
923 
924         req->mmu_info.base_addr = ((unsigned long)(MMU_Base_phys)>>2);
925         req->dst.yrgb_addr = (req->dst.yrgb_addr & (~PAGE_MASK));
926 
927         /*record the malloc buf for the cmd end to release*/
928         reg->MMU_base = MMU_Base;
929 
930         /* flush data to DDR */
931         rga_dma_flush_range(MMU_Base, (MMU_Base + AllSize + 1));
932 
933         rga_mmu_buf_get(&rga_mmu_buf, AllSize + 16);
934         reg->MMU_len = AllSize + 16;
935 
936         return 0;
937     }
938     while(0);
939 
940     return status;
941 }
942 
943 
rga_mmu_info_line_point_drawing_mode(struct rga_reg * reg,struct rga_req * req)944 static int rga_mmu_info_line_point_drawing_mode(struct rga_reg *reg, struct rga_req *req)
945 {
946     return 0;
947 }
948 
rga_mmu_info_blur_sharp_filter_mode(struct rga_reg * reg,struct rga_req * req)949 static int rga_mmu_info_blur_sharp_filter_mode(struct rga_reg *reg, struct rga_req *req)
950 {
951     return 0;
952 }
953 
954 
955 
rga_mmu_info_pre_scale_mode(struct rga_reg * reg,struct rga_req * req)956 static int rga_mmu_info_pre_scale_mode(struct rga_reg *reg, struct rga_req *req)
957 {
958     int SrcMemSize, DstMemSize;
959     unsigned long SrcStart, DstStart;
960     struct page **pages = NULL;
961     uint32_t i;
962     uint32_t AllSize;
963     uint32_t *MMU_Base, *MMU_p, *MMU_Base_phys;
964     int ret;
965     int status;
966     uint32_t uv_size, v_size;
967 
968     MMU_Base = NULL;
969 
970     do {
971         /* cal src buf mmu info */
972         SrcMemSize = rga_buf_size_cal(req->src.yrgb_addr, req->src.uv_addr, req->src.v_addr,
973                                         req->src.format, req->src.vir_w, req->src.vir_h,
974                                         &SrcStart);
975         if(SrcMemSize == 0) {
976             return -EINVAL;
977         }
978 
979         /* cal dst buf mmu info */
980         DstMemSize = rga_buf_size_cal(req->dst.yrgb_addr, req->dst.uv_addr, req->dst.v_addr,
981                                         req->dst.format, req->dst.vir_w, req->dst.vir_h,
982                                         &DstStart);
983         if(DstMemSize == 0) {
984             return -EINVAL;
985         }
986 
987 	    SrcMemSize = (SrcMemSize + 15) & (~15);
988 	    DstMemSize = (DstMemSize + 15) & (~15);
989 
990         AllSize = SrcMemSize + DstMemSize;
991 
992         pages = rga_mmu_buf.pages;
993 
994         if (rga_mmu_buf_get_try(&rga_mmu_buf, AllSize + 16)) {
995             pr_err("RGA Get MMU mem failed\n");
996             status = RGA_MALLOC_ERROR;
997             break;
998         }
999 
1000         mutex_lock(&rga_service.lock);
1001         MMU_Base = rga_mmu_buf.buf_virtual + (rga_mmu_buf.front & (rga_mmu_buf.size - 1));
1002         MMU_Base_phys = rga_mmu_buf.buf + (rga_mmu_buf.front & (rga_mmu_buf.size - 1));
1003         mutex_unlock(&rga_service.lock);
1004 
1005         /* map src pages */
1006         if ((req->mmu_info.mmu_flag >> 8) & 1) {
1007             if (req->sg_src) {
1008                 ret = rga_MapION(req->sg_src, &MMU_Base[0], SrcMemSize,req->line_draw_info.flag);
1009             }
1010             else {
1011                 ret = rga_MapUserMemory(&pages[0], &MMU_Base[0], SrcStart, SrcMemSize);
1012                 if (ret < 0) {
1013                     pr_err("rga map src memory failed\n");
1014                     status = ret;
1015                     break;
1016                 }
1017             }
1018         }
1019         else {
1020             MMU_p = MMU_Base;
1021 
1022             for(i=0; i<SrcMemSize; i++)
1023                 MMU_p[i] = (uint32_t)((SrcStart + i) << PAGE_SHIFT);
1024         }
1025 
1026         if((req->mmu_info.mmu_flag >> 10) & 1) {
1027             if (req->sg_dst) {
1028                 ret = rga_MapION(req->sg_dst, &MMU_Base[SrcMemSize], DstMemSize, req->line_draw_info.line_width);
1029             }
1030             else {
1031                 ret = rga_MapUserMemory(&pages[SrcMemSize], &MMU_Base[SrcMemSize], DstStart, DstMemSize);
1032                 if (ret < 0) {
1033                     pr_err("rga map dst memory failed\n");
1034                     status = ret;
1035                     break;
1036                 }
1037             }
1038         }
1039         else
1040         {
1041             /* kernel space */
1042             MMU_p = MMU_Base + SrcMemSize;
1043 
1044             if(req->dst.yrgb_addr == (unsigned long)rga_service.pre_scale_buf) {
1045                 for(i=0; i<DstMemSize; i++)
1046                     MMU_p[i] = rga_service.pre_scale_buf[i];
1047             }
1048             else {
1049                 for(i=0; i<DstMemSize; i++)
1050                     MMU_p[i] = (uint32_t)((DstStart + i) << PAGE_SHIFT);
1051             }
1052         }
1053 
1054         MMU_Base[AllSize] = MMU_Base[AllSize];
1055 
1056         /* zsq
1057          * change the buf address in req struct
1058          * for the reason of lie to MMU
1059          */
1060 
1061         req->mmu_info.base_addr = ((unsigned long)(MMU_Base_phys)>>2);
1062 
1063         uv_size = (req->src.uv_addr - (SrcStart << PAGE_SHIFT)) >> PAGE_SHIFT;
1064         v_size = (req->src.v_addr - (SrcStart << PAGE_SHIFT)) >> PAGE_SHIFT;
1065 
1066         req->src.yrgb_addr = (req->src.yrgb_addr & (~PAGE_MASK));
1067         req->src.uv_addr = (req->src.uv_addr & (~PAGE_MASK)) | (uv_size << PAGE_SHIFT);
1068         req->src.v_addr = (req->src.v_addr & (~PAGE_MASK)) | (v_size << PAGE_SHIFT);
1069 
1070         uv_size = (req->dst.uv_addr - (DstStart << PAGE_SHIFT)) >> PAGE_SHIFT;
1071         v_size = (req->dst.v_addr - (DstStart << PAGE_SHIFT)) >> PAGE_SHIFT;
1072 
1073         req->dst.yrgb_addr = (req->dst.yrgb_addr & (~PAGE_MASK)) | ((SrcMemSize) << PAGE_SHIFT);
1074         req->dst.uv_addr = (req->dst.uv_addr & (~PAGE_MASK)) | ((SrcMemSize + uv_size) << PAGE_SHIFT);
1075         req->dst.v_addr = (req->dst.v_addr & (~PAGE_MASK)) | ((SrcMemSize + v_size) << PAGE_SHIFT);
1076 
1077         /*record the malloc buf for the cmd end to release*/
1078         reg->MMU_base = MMU_Base;
1079 
1080         /* flush data to DDR */
1081         rga_dma_flush_range(MMU_Base, (MMU_Base + AllSize + 1));
1082 
1083 	    rga_mmu_buf_get(&rga_mmu_buf, AllSize + 16);
1084         reg->MMU_len = AllSize + 16;
1085 
1086         return 0;
1087     }
1088     while(0);
1089 
1090     return status;
1091 }
1092 
1093 
rga_mmu_info_update_palette_table_mode(struct rga_reg * reg,struct rga_req * req)1094 static int rga_mmu_info_update_palette_table_mode(struct rga_reg *reg, struct rga_req *req)
1095 {
1096     int SrcMemSize, CMDMemSize;
1097     unsigned long SrcStart, CMDStart;
1098     struct page **pages = NULL;
1099     uint32_t i;
1100     uint32_t AllSize;
1101     uint32_t *MMU_Base, *MMU_p;
1102     int ret, status;
1103 
1104     MMU_Base = NULL;
1105 
1106     do {
1107         /* cal src buf mmu info */
1108         SrcMemSize = rga_mem_size_cal(req->src.yrgb_addr, req->src.vir_w * req->src.vir_h, &SrcStart);
1109         if(SrcMemSize == 0) {
1110             return -EINVAL;
1111         }
1112 
1113         /* cal cmd buf mmu info */
1114         CMDMemSize = rga_mem_size_cal((unsigned long)rga_service.cmd_buff, RGA_CMD_BUF_SIZE, &CMDStart);
1115         if(CMDMemSize == 0) {
1116             return -EINVAL;
1117         }
1118 
1119         AllSize = SrcMemSize + CMDMemSize;
1120 
1121         pages = kzalloc(AllSize * sizeof(struct page *), GFP_KERNEL);
1122         if(pages == NULL) {
1123             pr_err("RGA MMU malloc pages mem failed\n");
1124             status = RGA_MALLOC_ERROR;
1125             break;
1126         }
1127 
1128         MMU_Base = kzalloc((AllSize + 1)* sizeof(uint32_t), GFP_KERNEL);
1129         if(pages == NULL) {
1130             pr_err("RGA MMU malloc MMU_Base point failed\n");
1131             status = RGA_MALLOC_ERROR;
1132             break;
1133         }
1134 
1135         for(i=0; i<CMDMemSize; i++) {
1136             MMU_Base[i] = (uint32_t)virt_to_phys((uint32_t *)((CMDStart + i) << PAGE_SHIFT));
1137         }
1138 
1139         if (req->src.yrgb_addr < KERNEL_SPACE_VALID)
1140         {
1141             ret = rga_MapUserMemory(&pages[CMDMemSize], &MMU_Base[CMDMemSize], SrcStart, SrcMemSize);
1142             if (ret < 0) {
1143                 pr_err("rga map src memory failed\n");
1144                 return -EINVAL;
1145             }
1146         }
1147         else
1148         {
1149             MMU_p = MMU_Base + CMDMemSize;
1150 
1151                 for(i=0; i<SrcMemSize; i++)
1152                 {
1153                     MMU_p[i] = (uint32_t)virt_to_phys((uint32_t *)((SrcStart + i) << PAGE_SHIFT));
1154                 }
1155         }
1156 
1157         /* zsq
1158          * change the buf address in req struct
1159          * for the reason of lie to MMU
1160          */
1161         req->mmu_info.base_addr = (virt_to_phys(MMU_Base) >> 2);
1162 
1163         req->src.yrgb_addr = (req->src.yrgb_addr & (~PAGE_MASK)) | (CMDMemSize << PAGE_SHIFT);
1164 
1165         /*record the malloc buf for the cmd end to release*/
1166         reg->MMU_base = MMU_Base;
1167 
1168         /* flush data to DDR */
1169         rga_dma_flush_range(MMU_Base, (MMU_Base + AllSize));
1170 
1171 
1172         if (pages != NULL) {
1173             /* Free the page table */
1174             kfree(pages);
1175         }
1176 
1177         return 0;
1178     }
1179     while(0);
1180 
1181     if (pages != NULL)
1182         kfree(pages);
1183 
1184     if (MMU_Base != NULL)
1185         kfree(MMU_Base);
1186 
1187     return status;
1188 }
1189 
rga_mmu_info_update_patten_buff_mode(struct rga_reg * reg,struct rga_req * req)1190 static int rga_mmu_info_update_patten_buff_mode(struct rga_reg *reg, struct rga_req *req)
1191 {
1192     int SrcMemSize, CMDMemSize;
1193     unsigned long SrcStart, CMDStart;
1194     struct page **pages = NULL;
1195     uint32_t i;
1196     uint32_t AllSize;
1197     uint32_t *MMU_Base, *MMU_p;
1198     int ret, status;
1199 
1200     MMU_Base = MMU_p = 0;
1201 
1202     do
1203     {
1204 
1205         /* cal src buf mmu info */
1206         SrcMemSize = rga_mem_size_cal(req->pat.yrgb_addr, req->pat.vir_w * req->pat.vir_h * 4, &SrcStart);
1207         if(SrcMemSize == 0) {
1208             return -EINVAL;
1209         }
1210 
1211         /* cal cmd buf mmu info */
1212         CMDMemSize = rga_mem_size_cal((unsigned long)rga_service.cmd_buff, RGA_CMD_BUF_SIZE, &CMDStart);
1213         if(CMDMemSize == 0) {
1214             return -EINVAL;
1215         }
1216 
1217         AllSize = SrcMemSize + CMDMemSize;
1218 
1219         pages = kzalloc(AllSize * sizeof(struct page *), GFP_KERNEL);
1220         if(pages == NULL) {
1221             pr_err("RGA MMU malloc pages mem failed\n");
1222             status = RGA_MALLOC_ERROR;
1223             break;
1224         }
1225 
1226         MMU_Base = kzalloc(AllSize * sizeof(uint32_t), GFP_KERNEL);
1227         if(MMU_Base == NULL) {
1228             pr_err("RGA MMU malloc MMU_Base point failed\n");
1229             status = RGA_MALLOC_ERROR;
1230             break;
1231         }
1232 
1233         for(i=0; i<CMDMemSize; i++) {
1234             MMU_Base[i] = virt_to_phys((uint32_t *)((CMDStart + i) << PAGE_SHIFT));
1235         }
1236 
1237         if (req->src.yrgb_addr < KERNEL_SPACE_VALID)
1238         {
1239             ret = rga_MapUserMemory(&pages[CMDMemSize], &MMU_Base[CMDMemSize], SrcStart, SrcMemSize);
1240             if (ret < 0) {
1241                 pr_err("rga map src memory failed\n");
1242                 status = ret;
1243                 break;
1244             }
1245         }
1246         else
1247         {
1248             MMU_p = MMU_Base + CMDMemSize;
1249 
1250             for(i=0; i<SrcMemSize; i++)
1251             {
1252                 MMU_p[i] = (uint32_t)virt_to_phys((uint32_t *)((SrcStart + i) << PAGE_SHIFT));
1253             }
1254         }
1255 
1256         /* zsq
1257          * change the buf address in req struct
1258          * for the reason of lie to MMU
1259          */
1260         req->mmu_info.base_addr = (virt_to_phys(MMU_Base) >> 2);
1261 
1262         req->src.yrgb_addr = (req->src.yrgb_addr & (~PAGE_MASK)) | (CMDMemSize << PAGE_SHIFT);
1263 
1264         /*record the malloc buf for the cmd end to release*/
1265         reg->MMU_base = MMU_Base;
1266 
1267         /* flush data to DDR */
1268         rga_dma_flush_range(MMU_Base, (MMU_Base + AllSize));
1269 
1270         if (pages != NULL) {
1271             /* Free the page table */
1272             kfree(pages);
1273         }
1274 
1275         return 0;
1276 
1277     }
1278     while(0);
1279 
1280     if (pages != NULL)
1281         kfree(pages);
1282 
1283     if (MMU_Base != NULL)
1284         kfree(MMU_Base);
1285 
1286     return status;
1287 }
1288 
rga_set_mmu_info(struct rga_reg * reg,struct rga_req * req)1289 int rga_set_mmu_info(struct rga_reg *reg, struct rga_req *req)
1290 {
1291     int ret;
1292 
1293     switch (req->render_mode) {
1294         case bitblt_mode :
1295             ret = rga_mmu_info_BitBlt_mode(reg, req);
1296             break;
1297         case color_palette_mode :
1298             ret = rga_mmu_info_color_palette_mode(reg, req);
1299             break;
1300         case color_fill_mode :
1301             ret = rga_mmu_info_color_fill_mode(reg, req);
1302             break;
1303         case line_point_drawing_mode :
1304             ret = rga_mmu_info_line_point_drawing_mode(reg, req);
1305             break;
1306         case blur_sharp_filter_mode :
1307             ret = rga_mmu_info_blur_sharp_filter_mode(reg, req);
1308             break;
1309         case pre_scaling_mode :
1310             ret = rga_mmu_info_pre_scale_mode(reg, req);
1311             break;
1312         case update_palette_table_mode :
1313             ret = rga_mmu_info_update_palette_table_mode(reg, req);
1314             break;
1315         case update_patten_buff_mode :
1316             ret = rga_mmu_info_update_patten_buff_mode(reg, req);
1317             break;
1318         default :
1319             ret = -1;
1320             break;
1321     }
1322 
1323     return ret;
1324 }
1325 
1326