xref: /OK3568_Linux_fs/kernel/drivers/video/rockchip/rga3/rga_mm.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) Rockchip Electronics Co., Ltd.
4  *
5  * Author: Cerf Yu <cerf.yu@rock-chips.com>
6  */
7 
8 #define pr_fmt(fmt) "rga_mm: " fmt
9 
10 #include "rga.h"
11 #include "rga_job.h"
12 #include "rga_mm.h"
13 #include "rga_dma_buf.h"
14 #include "rga_common.h"
15 #include "rga_iommu.h"
16 #include "rga_hw_config.h"
17 #include "rga_debugger.h"
18 
rga_current_mm_read_lock(struct mm_struct * mm)19 static void rga_current_mm_read_lock(struct mm_struct *mm)
20 {
21 #if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 10, 0)
22 	mmap_read_lock(mm);
23 #else
24 	down_read(&mm->mmap_sem);
25 #endif
26 }
27 
rga_current_mm_read_unlock(struct mm_struct * mm)28 static void rga_current_mm_read_unlock(struct mm_struct *mm)
29 {
30 #if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 10, 0)
31 	mmap_read_unlock(mm);
32 #else
33 	up_read(&mm->mmap_sem);
34 #endif
35 }
36 
rga_get_user_pages_from_vma(struct page ** pages,unsigned long Memory,uint32_t pageCount,struct mm_struct * current_mm)37 static int rga_get_user_pages_from_vma(struct page **pages, unsigned long Memory,
38 				       uint32_t pageCount, struct mm_struct *current_mm)
39 {
40 	int ret = 0;
41 	int i;
42 	struct vm_area_struct *vma;
43 	spinlock_t *ptl;
44 	pte_t *pte;
45 	pgd_t *pgd;
46 #if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 10, 0)
47 	p4d_t *p4d;
48 #endif
49 	pud_t *pud;
50 	pmd_t *pmd;
51 	unsigned long pfn;
52 
53 	for (i = 0; i < pageCount; i++) {
54 		vma = find_vma(current_mm, (Memory + i) << PAGE_SHIFT);
55 		if (!vma) {
56 			pr_err("page[%d] failed to get vma\n", i);
57 			ret = RGA_OUT_OF_RESOURCES;
58 			break;
59 		}
60 
61 		pgd = pgd_offset(current_mm, (Memory + i) << PAGE_SHIFT);
62 		if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd))) {
63 			pr_err("page[%d] failed to get pgd\n", i);
64 			ret = RGA_OUT_OF_RESOURCES;
65 			break;
66 		}
67 #if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 10, 0)
68 		/*
69 		 * In the four-level page table,
70 		 * it will do nothing and return pgd.
71 		 */
72 		p4d = p4d_offset(pgd, (Memory + i) << PAGE_SHIFT);
73 		if (p4d_none(*p4d) || unlikely(p4d_bad(*p4d))) {
74 			pr_err("page[%d] failed to get p4d\n", i);
75 			ret = RGA_OUT_OF_RESOURCES;
76 			break;
77 		}
78 
79 		pud = pud_offset(p4d, (Memory + i) << PAGE_SHIFT);
80 #else
81 		pud = pud_offset(pgd, (Memory + i) << PAGE_SHIFT);
82 #endif
83 
84 		if (pud_none(*pud) || unlikely(pud_bad(*pud))) {
85 			pr_err("page[%d] failed to get pud\n", i);
86 			ret = RGA_OUT_OF_RESOURCES;
87 			break;
88 		}
89 		pmd = pmd_offset(pud, (Memory + i) << PAGE_SHIFT);
90 		if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd))) {
91 			pr_err("page[%d] failed to get pmd\n", i);
92 			ret = RGA_OUT_OF_RESOURCES;
93 			break;
94 		}
95 		pte = pte_offset_map_lock(current_mm, pmd,
96 					  (Memory + i) << PAGE_SHIFT, &ptl);
97 		if (pte_none(*pte)) {
98 			pr_err("page[%d] failed to get pte\n", i);
99 			pte_unmap_unlock(pte, ptl);
100 			ret = RGA_OUT_OF_RESOURCES;
101 			break;
102 		}
103 
104 		pfn = pte_pfn(*pte);
105 		pages[i] = pfn_to_page(pfn);
106 		pte_unmap_unlock(pte, ptl);
107 	}
108 
109 	if (ret == RGA_OUT_OF_RESOURCES && i > 0)
110 		pr_err("Only get buffer %d byte from vma, but current image required %d byte",
111 		       (int)(i * PAGE_SIZE), (int)(pageCount * PAGE_SIZE));
112 
113 	return ret;
114 }
115 
rga_get_user_pages(struct page ** pages,unsigned long Memory,uint32_t pageCount,int writeFlag,struct mm_struct * current_mm)116 static int rga_get_user_pages(struct page **pages, unsigned long Memory,
117 			      uint32_t pageCount, int writeFlag,
118 			      struct mm_struct *current_mm)
119 {
120 	uint32_t i;
121 	int32_t ret = 0;
122 	int32_t result;
123 
124 	rga_current_mm_read_lock(current_mm);
125 
126 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 168) && \
127     LINUX_VERSION_CODE < KERNEL_VERSION(4, 5, 0)
128 	result = get_user_pages(current, current_mm, Memory << PAGE_SHIFT,
129 				pageCount, writeFlag ? FOLL_WRITE : 0,
130 				pages, NULL);
131 #elif LINUX_VERSION_CODE < KERNEL_VERSION(4, 6, 0)
132 	result = get_user_pages(current, current_mm, Memory << PAGE_SHIFT,
133 				pageCount, writeFlag ? FOLL_WRITE : 0, 0, pages, NULL);
134 #elif LINUX_VERSION_CODE < KERNEL_VERSION(5, 10, 0)
135 	result = get_user_pages_remote(current, current_mm,
136 				       Memory << PAGE_SHIFT,
137 				       pageCount, writeFlag ? FOLL_WRITE : 0, pages, NULL, NULL);
138 #else
139 	result = get_user_pages_remote(current_mm, Memory << PAGE_SHIFT,
140 				       pageCount, writeFlag ? FOLL_WRITE : 0, pages, NULL, NULL);
141 #endif
142 
143 	if (result > 0 && result >= pageCount) {
144 		ret = result;
145 	} else {
146 		if (result > 0)
147 			for (i = 0; i < result; i++)
148 				put_page(pages[i]);
149 
150 		ret = rga_get_user_pages_from_vma(pages, Memory, pageCount, current_mm);
151 		if (ret < 0 && result > 0) {
152 			pr_err("Only get buffer %d byte from user pages, but current image required %d byte\n",
153 			       (int)(result * PAGE_SIZE), (int)(pageCount * PAGE_SIZE));
154 		}
155 	}
156 
157 	rga_current_mm_read_unlock(current_mm);
158 
159 	return ret;
160 }
161 
rga_free_sgt(struct sg_table ** sgt_ptr)162 static void rga_free_sgt(struct sg_table **sgt_ptr)
163 {
164 	if (sgt_ptr == NULL || *sgt_ptr == NULL)
165 		return;
166 
167 	sg_free_table(*sgt_ptr);
168 	kfree(*sgt_ptr);
169 	*sgt_ptr = NULL;
170 }
171 
rga_alloc_sgt(struct rga_virt_addr * virt_addr)172 static struct sg_table *rga_alloc_sgt(struct rga_virt_addr *virt_addr)
173 {
174 	int ret;
175 	struct sg_table *sgt = NULL;
176 
177 	sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
178 	if (sgt == NULL) {
179 		pr_err("%s alloc sgt error!\n", __func__);
180 		return ERR_PTR(-ENOMEM);
181 	}
182 
183 	/* get sg form pages. */
184 	/* iova requires minimum page alignment, so sgt cannot have offset */
185 	ret = sg_alloc_table_from_pages(sgt,
186 					virt_addr->pages,
187 					virt_addr->page_count,
188 					0,
189 					virt_addr->size,
190 					GFP_KERNEL);
191 	if (ret) {
192 		pr_err("sg_alloc_table_from_pages failed");
193 		goto out_free_sgt;
194 	}
195 
196 	return sgt;
197 
198 out_free_sgt:
199 	kfree(sgt);
200 
201 	return ERR_PTR(ret);
202 }
203 
rga_free_virt_addr(struct rga_virt_addr ** virt_addr_p)204 static void rga_free_virt_addr(struct rga_virt_addr **virt_addr_p)
205 {
206 	int i;
207 	struct rga_virt_addr *virt_addr = NULL;
208 
209 	if (virt_addr_p == NULL)
210 		return;
211 
212 	virt_addr = *virt_addr_p;
213 	if (virt_addr == NULL)
214 		return;
215 
216 	for (i = 0; i < virt_addr->result; i++)
217 		put_page(virt_addr->pages[i]);
218 
219 	free_pages((unsigned long)virt_addr->pages, virt_addr->pages_order);
220 	kfree(virt_addr);
221 	*virt_addr_p = NULL;
222 }
223 
rga_alloc_virt_addr(struct rga_virt_addr ** virt_addr_p,uint64_t viraddr,struct rga_memory_parm * memory_parm,int writeFlag,struct mm_struct * mm)224 static int rga_alloc_virt_addr(struct rga_virt_addr **virt_addr_p,
225 			       uint64_t viraddr,
226 			       struct rga_memory_parm *memory_parm,
227 			       int writeFlag,
228 			       struct mm_struct *mm)
229 {
230 	int i;
231 	int ret;
232 	int result = 0;
233 	int order;
234 	unsigned int count;
235 	int img_size;
236 	size_t offset;
237 	unsigned long size;
238 	struct page **pages = NULL;
239 	struct rga_virt_addr *virt_addr = NULL;
240 
241 	if (memory_parm->size)
242 		img_size = memory_parm->size;
243 	else
244 		img_size = rga_image_size_cal(memory_parm->width,
245 					      memory_parm->height,
246 					      memory_parm->format,
247 					      NULL, NULL, NULL);
248 
249 	offset = viraddr & (~PAGE_MASK);
250 	count = RGA_GET_PAGE_COUNT(img_size + offset);
251 	size = count * PAGE_SIZE;
252 	if (!size) {
253 		pr_err("failed to calculating buffer size! size = %ld, count = %d, offset = %ld\n",
254 		       size, count, (unsigned long)offset);
255 		rga_dump_memory_parm(memory_parm);
256 		return -EFAULT;
257 	}
258 
259 	/* alloc pages and page_table */
260 	order = get_order(count * sizeof(struct page *));
261 	if (order >= MAX_ORDER) {
262 		pr_err("Can not alloc pages with order[%d] for viraddr pages, max_order = %d\n",
263 		       order, MAX_ORDER);
264 		return -ENOMEM;
265 	}
266 
267 	pages = (struct page **)__get_free_pages(GFP_KERNEL, order);
268 	if (pages == NULL) {
269 		pr_err("%s can not alloc pages for viraddr pages\n", __func__);
270 		return -ENOMEM;
271 	}
272 
273 	/* get pages from virtual address. */
274 	ret = rga_get_user_pages(pages, viraddr >> PAGE_SHIFT, count, writeFlag, mm);
275 	if (ret < 0) {
276 		pr_err("failed to get pages from virtual adrees: 0x%lx\n",
277 		       (unsigned long)viraddr);
278 		ret = -EINVAL;
279 		goto out_free_pages;
280 	} else if (ret > 0) {
281 		/* For put pages */
282 		result = ret;
283 	}
284 
285 	*virt_addr_p = kzalloc(sizeof(struct rga_virt_addr), GFP_KERNEL);
286 	if (*virt_addr_p == NULL) {
287 		pr_err("%s alloc virt_addr error!\n", __func__);
288 		ret = -ENOMEM;
289 		goto out_put_and_free_pages;
290 	}
291 	virt_addr = *virt_addr_p;
292 
293 	virt_addr->addr = viraddr;
294 	virt_addr->pages = pages;
295 	virt_addr->pages_order = order;
296 	virt_addr->page_count = count;
297 	virt_addr->size = size;
298 	virt_addr->offset = offset;
299 	virt_addr->result = result;
300 
301 	return 0;
302 
303 out_put_and_free_pages:
304 	for (i = 0; i < result; i++)
305 		put_page(pages[i]);
306 out_free_pages:
307 	free_pages((unsigned long)pages, order);
308 
309 	return ret;
310 }
311 
rga_mm_check_memory_limit(struct rga_scheduler_t * scheduler,int mm_flag)312 static inline bool rga_mm_check_memory_limit(struct rga_scheduler_t *scheduler, int mm_flag)
313 {
314 	if (!scheduler)
315 		return false;
316 
317 	if (scheduler->data->mmu == RGA_MMU &&
318 	    !(mm_flag & RGA_MEM_UNDER_4G)) {
319 		pr_err("%s unsupported memory larger than 4G!\n",
320 		       rga_get_mmu_type_str(scheduler->data->mmu));
321 		return false;
322 	}
323 
324 	return true;
325 }
326 
327 /* If it is within 0~4G, return 1 (true). */
rga_mm_check_range_sgt(struct sg_table * sgt)328 static int rga_mm_check_range_sgt(struct sg_table *sgt)
329 {
330 	int i;
331 	struct scatterlist *sg;
332 	phys_addr_t s_phys = 0;
333 
334 	for_each_sg(sgt->sgl, sg, sgt->orig_nents, i) {
335 		s_phys = sg_phys(sg);
336 		if ((s_phys > 0xffffffff) || (s_phys + sg->length > 0xffffffff))
337 			return 0;
338 	}
339 
340 	return 1;
341 }
342 
rga_mm_check_range_phys_addr(phys_addr_t paddr,size_t size)343 static inline int rga_mm_check_range_phys_addr(phys_addr_t paddr, size_t size)
344 {
345 	return ((paddr + size) <= 0xffffffff);
346 }
347 
rga_mm_check_contiguous_sgt(struct sg_table * sgt)348 static inline bool rga_mm_check_contiguous_sgt(struct sg_table *sgt)
349 {
350 	if (sgt->orig_nents == 1)
351 		return true;
352 
353 	return false;
354 }
355 
rga_mm_unmap_dma_buffer(struct rga_internal_buffer * internal_buffer)356 static void rga_mm_unmap_dma_buffer(struct rga_internal_buffer *internal_buffer)
357 {
358 	if (rga_mm_is_invalid_dma_buffer(internal_buffer->dma_buffer))
359 		return;
360 
361 	rga_dma_unmap_buf(internal_buffer->dma_buffer);
362 
363 	if (internal_buffer->mm_flag & RGA_MEM_PHYSICAL_CONTIGUOUS &&
364 	    internal_buffer->phys_addr > 0)
365 		internal_buffer->phys_addr = 0;
366 
367 	kfree(internal_buffer->dma_buffer);
368 	internal_buffer->dma_buffer = NULL;
369 }
370 
rga_mm_map_dma_buffer(struct rga_external_buffer * external_buffer,struct rga_internal_buffer * internal_buffer,struct rga_job * job)371 static int rga_mm_map_dma_buffer(struct rga_external_buffer *external_buffer,
372 				 struct rga_internal_buffer *internal_buffer,
373 				 struct rga_job *job)
374 {
375 	int ret;
376 	int ex_buffer_size;
377 	uint32_t mm_flag = 0;
378 	phys_addr_t phys_addr = 0;
379 	struct rga_dma_buffer *buffer;
380 	struct device *map_dev;
381 	struct rga_scheduler_t *scheduler;
382 
383 	scheduler = job ? job->scheduler :
384 		    rga_drvdata->scheduler[rga_drvdata->map_scheduler_index];
385 	if (scheduler == NULL) {
386 		pr_err("Invalid scheduler device!\n");
387 		return -EINVAL;
388 	}
389 
390 	if (external_buffer->memory_parm.size)
391 		ex_buffer_size = external_buffer->memory_parm.size;
392 	else
393 		ex_buffer_size = rga_image_size_cal(external_buffer->memory_parm.width,
394 						    external_buffer->memory_parm.height,
395 						    external_buffer->memory_parm.format,
396 						    NULL, NULL, NULL);
397 	if (ex_buffer_size <= 0) {
398 		pr_err("failed to calculating buffer size!\n");
399 		rga_dump_memory_parm(&external_buffer->memory_parm);
400 		return ex_buffer_size == 0 ? -EINVAL : ex_buffer_size;
401 	}
402 
403 	/*
404 	 * dma-buf api needs to use default_domain of main dev,
405 	 * and not IOMMU for devices without iommu_info ptr.
406 	 */
407 	map_dev = scheduler->iommu_info ? scheduler->iommu_info->default_dev : scheduler->dev;
408 
409 	buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
410 	if (buffer == NULL) {
411 		pr_err("%s alloc internal_buffer error!\n", __func__);
412 		return  -ENOMEM;
413 	}
414 
415 	switch (external_buffer->type) {
416 	case RGA_DMA_BUFFER:
417 		ret = rga_dma_map_fd((int)external_buffer->memory,
418 				     buffer, DMA_BIDIRECTIONAL,
419 				     map_dev);
420 		break;
421 	case RGA_DMA_BUFFER_PTR:
422 		ret = rga_dma_map_buf((struct dma_buf *)u64_to_user_ptr(external_buffer->memory),
423 				      buffer, DMA_BIDIRECTIONAL,
424 				      map_dev);
425 		break;
426 	default:
427 		ret = -EFAULT;
428 		break;
429 	}
430 	if (ret < 0) {
431 		pr_err("%s core[%d] map dma buffer error!\n",
432 		       __func__, scheduler->core);
433 		goto free_buffer;
434 	}
435 
436 	if (buffer->size < ex_buffer_size) {
437 		pr_err("Only get buffer %ld byte from %s = 0x%lx, but current image required %d byte\n",
438 		       buffer->size, rga_get_memory_type_str(external_buffer->type),
439 		       (unsigned long)external_buffer->memory, ex_buffer_size);
440 		rga_dump_memory_parm(&external_buffer->memory_parm);
441 		ret = -EINVAL;
442 		goto unmap_buffer;
443 	}
444 
445 	buffer->scheduler = scheduler;
446 
447 	if (rga_mm_check_range_sgt(buffer->sgt))
448 		mm_flag |= RGA_MEM_UNDER_4G;
449 
450 	/*
451 	 * If it's physically contiguous, then the RGA_MMU can
452 	 * directly use the physical address.
453 	 */
454 	if (rga_mm_check_contiguous_sgt(buffer->sgt)) {
455 		phys_addr = sg_phys(buffer->sgt->sgl);
456 		if (phys_addr == 0) {
457 			pr_err("%s get physical address error!", __func__);
458 			goto unmap_buffer;
459 		}
460 
461 		mm_flag |= RGA_MEM_PHYSICAL_CONTIGUOUS;
462 	}
463 
464 	if (!rga_mm_check_memory_limit(scheduler, mm_flag)) {
465 		pr_err("scheduler core[%d] unsupported mm_flag[0x%x]!\n",
466 		       scheduler->core, mm_flag);
467 		ret = -EINVAL;
468 		goto unmap_buffer;
469 	}
470 
471 	internal_buffer->dma_buffer = buffer;
472 	internal_buffer->mm_flag = mm_flag;
473 	internal_buffer->phys_addr = phys_addr ? phys_addr : 0;
474 
475 	return 0;
476 
477 unmap_buffer:
478 	rga_dma_unmap_buf(buffer);
479 
480 free_buffer:
481 	kfree(buffer);
482 
483 	return ret;
484 }
485 
rga_mm_unmap_virt_addr(struct rga_internal_buffer * internal_buffer)486 static void rga_mm_unmap_virt_addr(struct rga_internal_buffer *internal_buffer)
487 {
488 	WARN_ON(internal_buffer->dma_buffer == NULL || internal_buffer->virt_addr == NULL);
489 
490 	if (rga_mm_is_invalid_dma_buffer(internal_buffer->dma_buffer))
491 		return;
492 
493 	switch (internal_buffer->dma_buffer->scheduler->data->mmu) {
494 	case RGA_IOMMU:
495 		rga_iommu_unmap(internal_buffer->dma_buffer);
496 		break;
497 	case RGA_MMU:
498 		dma_unmap_sg(internal_buffer->dma_buffer->scheduler->dev,
499 			     internal_buffer->dma_buffer->sgt->sgl,
500 			     internal_buffer->dma_buffer->sgt->orig_nents,
501 			     DMA_BIDIRECTIONAL);
502 		break;
503 	default:
504 		break;
505 	}
506 
507 	if (internal_buffer->mm_flag & RGA_MEM_PHYSICAL_CONTIGUOUS &&
508 	    internal_buffer->phys_addr > 0)
509 		internal_buffer->phys_addr = 0;
510 
511 	rga_free_sgt(&internal_buffer->dma_buffer->sgt);
512 
513 	kfree(internal_buffer->dma_buffer);
514 	internal_buffer->dma_buffer = NULL;
515 
516 	rga_free_virt_addr(&internal_buffer->virt_addr);
517 
518 	mmput(internal_buffer->current_mm);
519 	mmdrop(internal_buffer->current_mm);
520 	internal_buffer->current_mm = NULL;
521 }
522 
rga_mm_map_virt_addr(struct rga_external_buffer * external_buffer,struct rga_internal_buffer * internal_buffer,struct rga_job * job,int write_flag)523 static int rga_mm_map_virt_addr(struct rga_external_buffer *external_buffer,
524 				struct rga_internal_buffer *internal_buffer,
525 				struct rga_job *job, int write_flag)
526 {
527 	int ret;
528 	uint32_t mm_flag = 0;
529 	phys_addr_t phys_addr = 0;
530 	struct sg_table *sgt;
531 	struct rga_virt_addr *virt_addr;
532 	struct rga_dma_buffer *buffer;
533 	struct rga_scheduler_t *scheduler;
534 
535 	scheduler = job ? job->scheduler :
536 		    rga_drvdata->scheduler[rga_drvdata->map_scheduler_index];
537 	if (scheduler == NULL) {
538 		pr_err("Invalid scheduler device!\n");
539 		return -EINVAL;
540 	}
541 
542 	internal_buffer->current_mm = job ? job->mm : current->mm;
543 	if (internal_buffer->current_mm == NULL) {
544 		pr_err("%s, cannot get current mm!\n", __func__);
545 		return -EFAULT;
546 	}
547 	mmgrab(internal_buffer->current_mm);
548 	mmget(internal_buffer->current_mm);
549 
550 	ret = rga_alloc_virt_addr(&virt_addr,
551 				  external_buffer->memory,
552 				  &internal_buffer->memory_parm,
553 				  write_flag, internal_buffer->current_mm);
554 	if (ret < 0) {
555 		pr_err("Can not alloc rga_virt_addr from 0x%lx\n",
556 		       (unsigned long)external_buffer->memory);
557 		goto put_current_mm;
558 	}
559 
560 	sgt = rga_alloc_sgt(virt_addr);
561 	if (IS_ERR(sgt)) {
562 		pr_err("alloc sgt error!\n");
563 		ret = PTR_ERR(sgt);
564 		goto free_virt_addr;
565 	}
566 
567 	if (rga_mm_check_range_sgt(sgt))
568 		mm_flag |= RGA_MEM_UNDER_4G;
569 
570 	if (rga_mm_check_contiguous_sgt(sgt)) {
571 		phys_addr = sg_phys(sgt->sgl);
572 		if (phys_addr == 0) {
573 			pr_err("%s get physical address error!", __func__);
574 			goto free_sgt;
575 		}
576 
577 		mm_flag |= RGA_MEM_PHYSICAL_CONTIGUOUS;
578 	}
579 
580 	/*
581 	 * Some userspace virtual addresses do not have an
582 	 * interface for flushing the cache, so it is mandatory
583 	 * to flush the cache when the virtual address is used.
584 	 */
585 	mm_flag |= RGA_MEM_FORCE_FLUSH_CACHE;
586 
587 	if (!rga_mm_check_memory_limit(scheduler, mm_flag)) {
588 		pr_err("scheduler core[%d] unsupported mm_flag[0x%x]!\n",
589 		       scheduler->core, mm_flag);
590 		ret = -EINVAL;
591 		goto free_sgt;
592 	}
593 
594 	buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
595 	if (buffer == NULL) {
596 		pr_err("%s alloc internal dma_buffer error!\n", __func__);
597 		ret =  -ENOMEM;
598 		goto free_sgt;
599 	}
600 
601 	switch (scheduler->data->mmu) {
602 	case RGA_IOMMU:
603 		ret = rga_iommu_map_sgt(sgt, virt_addr->size, buffer, scheduler->dev);
604 		if (ret < 0) {
605 			pr_err("%s core[%d] iommu_map virtual address error!\n",
606 			       __func__, scheduler->core);
607 			goto free_dma_buffer;
608 		}
609 		break;
610 	case RGA_MMU:
611 		ret = dma_map_sg(scheduler->dev, sgt->sgl, sgt->orig_nents, DMA_BIDIRECTIONAL);
612 		if (ret == 0) {
613 			pr_err("%s core[%d] dma_map_sgt error! va = 0x%lx, nents = %d\n",
614 				__func__, scheduler->core,
615 				(unsigned long)virt_addr->addr, sgt->orig_nents);
616 			ret = -EINVAL;
617 			goto free_dma_buffer;
618 		}
619 		break;
620 	default:
621 		if (mm_flag & RGA_MEM_PHYSICAL_CONTIGUOUS)
622 			break;
623 
624 		pr_err("Current %s[%d] cannot support virtual address!\n",
625 		       rga_get_mmu_type_str(scheduler->data->mmu), scheduler->data->mmu);
626 		goto free_dma_buffer;
627 	}
628 
629 	buffer->sgt = sgt;
630 	buffer->offset = virt_addr->offset;
631 	buffer->size = virt_addr->size;
632 	buffer->scheduler = scheduler;
633 
634 	internal_buffer->virt_addr = virt_addr;
635 	internal_buffer->dma_buffer = buffer;
636 	internal_buffer->mm_flag = mm_flag;
637 	internal_buffer->phys_addr = phys_addr ? phys_addr + virt_addr->offset : 0;
638 
639 	return 0;
640 
641 free_dma_buffer:
642 	kfree(buffer);
643 free_sgt:
644 	rga_free_sgt(&sgt);
645 free_virt_addr:
646 	rga_free_virt_addr(&virt_addr);
647 put_current_mm:
648 	mmput(internal_buffer->current_mm);
649 	mmdrop(internal_buffer->current_mm);
650 	internal_buffer->current_mm = NULL;
651 
652 	return ret;
653 }
654 
rga_mm_unmap_phys_addr(struct rga_internal_buffer * internal_buffer)655 static void rga_mm_unmap_phys_addr(struct rga_internal_buffer *internal_buffer)
656 {
657 	WARN_ON(internal_buffer->dma_buffer == NULL);
658 
659 	if (rga_mm_is_invalid_dma_buffer(internal_buffer->dma_buffer))
660 		return;
661 
662 	if (internal_buffer->dma_buffer->scheduler->data->mmu == RGA_IOMMU)
663 		rga_iommu_unmap(internal_buffer->dma_buffer);
664 
665 	kfree(internal_buffer->dma_buffer);
666 	internal_buffer->dma_buffer = NULL;
667 	internal_buffer->phys_addr = 0;
668 	internal_buffer->size = 0;
669 }
670 
rga_mm_map_phys_addr(struct rga_external_buffer * external_buffer,struct rga_internal_buffer * internal_buffer,struct rga_job * job)671 static int rga_mm_map_phys_addr(struct rga_external_buffer *external_buffer,
672 				struct rga_internal_buffer *internal_buffer,
673 				struct rga_job *job)
674 {
675 	int ret;
676 	phys_addr_t phys_addr;
677 	int buffer_size;
678 	uint32_t mm_flag = 0;
679 	struct rga_dma_buffer *buffer;
680 	struct rga_scheduler_t *scheduler;
681 
682 	scheduler = job ? job->scheduler :
683 		    rga_drvdata->scheduler[rga_drvdata->map_scheduler_index];
684 	if (scheduler == NULL) {
685 		pr_err("Invalid scheduler device!\n");
686 		return -EINVAL;
687 	}
688 
689 	if (internal_buffer->memory_parm.size)
690 		buffer_size = internal_buffer->memory_parm.size;
691 	else
692 		buffer_size = rga_image_size_cal(internal_buffer->memory_parm.width,
693 						 internal_buffer->memory_parm.height,
694 						 internal_buffer->memory_parm.format,
695 						 NULL, NULL, NULL);
696 	if (buffer_size <= 0) {
697 		pr_err("Failed to get phys addr size!\n");
698 		rga_dump_memory_parm(&internal_buffer->memory_parm);
699 		return buffer_size == 0 ? -EINVAL : buffer_size;
700 	}
701 
702 	phys_addr = external_buffer->memory;
703 	mm_flag |= RGA_MEM_PHYSICAL_CONTIGUOUS;
704 	if (rga_mm_check_range_phys_addr(phys_addr, buffer_size))
705 		mm_flag |= RGA_MEM_UNDER_4G;
706 
707 	if (!rga_mm_check_memory_limit(scheduler, mm_flag)) {
708 		pr_err("scheduler core[%d] unsupported mm_flag[0x%x]!\n",
709 		       scheduler->core, mm_flag);
710 		return -EINVAL;
711 	}
712 
713 	buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
714 	if (buffer == NULL) {
715 		pr_err("%s alloc internal dma buffer error!\n", __func__);
716 		return  -ENOMEM;
717 	}
718 
719 	if (scheduler->data->mmu == RGA_IOMMU) {
720 		ret = rga_iommu_map(phys_addr, buffer_size, buffer, scheduler->dev);
721 		if (ret < 0) {
722 			pr_err("%s core[%d] map phys_addr error!\n", __func__, scheduler->core);
723 			goto free_dma_buffer;
724 		}
725 	}
726 
727 	buffer->scheduler = scheduler;
728 
729 	internal_buffer->phys_addr = phys_addr;
730 	internal_buffer->size = buffer_size;
731 	internal_buffer->mm_flag = mm_flag;
732 	internal_buffer->dma_buffer = buffer;
733 
734 	return 0;
735 
736 free_dma_buffer:
737 	kfree(buffer);
738 
739 	return ret;
740 }
741 
rga_mm_unmap_buffer(struct rga_internal_buffer * internal_buffer)742 static int rga_mm_unmap_buffer(struct rga_internal_buffer *internal_buffer)
743 {
744 	switch (internal_buffer->type) {
745 	case RGA_DMA_BUFFER:
746 	case RGA_DMA_BUFFER_PTR:
747 		rga_mm_unmap_dma_buffer(internal_buffer);
748 		break;
749 	case RGA_VIRTUAL_ADDRESS:
750 		rga_mm_unmap_virt_addr(internal_buffer);
751 		break;
752 	case RGA_PHYSICAL_ADDRESS:
753 		rga_mm_unmap_phys_addr(internal_buffer);
754 		break;
755 	default:
756 		pr_err("Illegal external buffer!\n");
757 		return -EFAULT;
758 	}
759 
760 	return 0;
761 }
762 
rga_mm_map_buffer(struct rga_external_buffer * external_buffer,struct rga_internal_buffer * internal_buffer,struct rga_job * job,int write_flag)763 static int rga_mm_map_buffer(struct rga_external_buffer *external_buffer,
764 			     struct rga_internal_buffer *internal_buffer,
765 			     struct rga_job *job, int write_flag)
766 {
767 	int ret;
768 
769 	memcpy(&internal_buffer->memory_parm, &external_buffer->memory_parm,
770 	       sizeof(internal_buffer->memory_parm));
771 
772 	switch (external_buffer->type) {
773 	case RGA_DMA_BUFFER:
774 	case RGA_DMA_BUFFER_PTR:
775 		internal_buffer->type = external_buffer->type;
776 
777 		ret = rga_mm_map_dma_buffer(external_buffer, internal_buffer, job);
778 		if (ret < 0) {
779 			pr_err("%s map dma_buf error!\n", __func__);
780 			return ret;
781 		}
782 
783 		internal_buffer->size = internal_buffer->dma_buffer->size -
784 					internal_buffer->dma_buffer->offset;
785 		internal_buffer->mm_flag |= RGA_MEM_NEED_USE_IOMMU;
786 		break;
787 	case RGA_VIRTUAL_ADDRESS:
788 		internal_buffer->type = RGA_VIRTUAL_ADDRESS;
789 
790 		ret = rga_mm_map_virt_addr(external_buffer, internal_buffer, job, write_flag);
791 		if (ret < 0) {
792 			pr_err("%s map virtual address error!\n", __func__);
793 			return ret;
794 		}
795 
796 		internal_buffer->size = internal_buffer->virt_addr->size -
797 					internal_buffer->virt_addr->offset;
798 		internal_buffer->mm_flag |= RGA_MEM_NEED_USE_IOMMU;
799 		break;
800 	case RGA_PHYSICAL_ADDRESS:
801 		internal_buffer->type = RGA_PHYSICAL_ADDRESS;
802 
803 		ret = rga_mm_map_phys_addr(external_buffer, internal_buffer, job);
804 		if (ret < 0) {
805 			pr_err("%s map physical address error!\n", __func__);
806 			return ret;
807 		}
808 
809 		internal_buffer->mm_flag |= RGA_MEM_NEED_USE_IOMMU;
810 		break;
811 	default:
812 		pr_err("Illegal external buffer!\n");
813 		return -EFAULT;
814 	}
815 
816 	return 0;
817 }
818 
rga_mm_kref_release_buffer(struct kref * ref)819 static void rga_mm_kref_release_buffer(struct kref *ref)
820 {
821 	struct rga_internal_buffer *internal_buffer;
822 
823 	internal_buffer = container_of(ref, struct rga_internal_buffer, refcount);
824 	rga_mm_unmap_buffer(internal_buffer);
825 
826 	idr_remove(&rga_drvdata->mm->memory_idr, internal_buffer->handle);
827 	kfree(internal_buffer);
828 	rga_drvdata->mm->buffer_count--;
829 }
830 
831 /*
832  * Called at driver close to release the memory's handle references.
833  */
rga_mm_handle_remove(int id,void * ptr,void * data)834 static int rga_mm_handle_remove(int id, void *ptr, void *data)
835 {
836 	struct rga_internal_buffer *internal_buffer = ptr;
837 
838 	rga_mm_kref_release_buffer(&internal_buffer->refcount);
839 
840 	return 0;
841 }
842 
843 static struct rga_internal_buffer *
rga_mm_lookup_external(struct rga_mm * mm_session,struct rga_external_buffer * external_buffer)844 rga_mm_lookup_external(struct rga_mm *mm_session,
845 		       struct rga_external_buffer *external_buffer)
846 {
847 	int id;
848 	struct dma_buf *dma_buf = NULL;
849 	struct rga_internal_buffer *temp_buffer = NULL;
850 	struct rga_internal_buffer *output_buffer = NULL;
851 
852 	WARN_ON(!mutex_is_locked(&mm_session->lock));
853 
854 	switch (external_buffer->type) {
855 	case RGA_DMA_BUFFER:
856 		dma_buf = dma_buf_get((int)external_buffer->memory);
857 		if (IS_ERR(dma_buf))
858 			return (struct rga_internal_buffer *)dma_buf;
859 
860 		idr_for_each_entry(&mm_session->memory_idr, temp_buffer, id) {
861 			if (temp_buffer->dma_buffer == NULL)
862 				continue;
863 
864 			if (temp_buffer->dma_buffer[0].dma_buf == dma_buf) {
865 				output_buffer = temp_buffer;
866 				break;
867 			}
868 		}
869 
870 		dma_buf_put(dma_buf);
871 		break;
872 	case RGA_VIRTUAL_ADDRESS:
873 		idr_for_each_entry(&mm_session->memory_idr, temp_buffer, id) {
874 			if (temp_buffer->virt_addr == NULL)
875 				continue;
876 
877 			if (temp_buffer->virt_addr->addr == external_buffer->memory) {
878 				output_buffer = temp_buffer;
879 				break;
880 			}
881 		}
882 
883 		break;
884 	case RGA_PHYSICAL_ADDRESS:
885 		idr_for_each_entry(&mm_session->memory_idr, temp_buffer, id) {
886 			if (temp_buffer->phys_addr == external_buffer->memory) {
887 				output_buffer = temp_buffer;
888 				break;
889 			}
890 		}
891 
892 		break;
893 	case RGA_DMA_BUFFER_PTR:
894 		idr_for_each_entry(&mm_session->memory_idr, temp_buffer, id) {
895 			if (temp_buffer->dma_buffer == NULL)
896 				continue;
897 
898 			if ((unsigned long)temp_buffer->dma_buffer[0].dma_buf ==
899 			    external_buffer->memory) {
900 				output_buffer = temp_buffer;
901 				break;
902 			}
903 		}
904 
905 		break;
906 
907 	default:
908 		pr_err("Illegal external buffer!\n");
909 		return NULL;
910 	}
911 
912 	return output_buffer;
913 }
914 
rga_mm_lookup_handle(struct rga_mm * mm_session,uint32_t handle)915 struct rga_internal_buffer *rga_mm_lookup_handle(struct rga_mm *mm_session, uint32_t handle)
916 {
917 	struct rga_internal_buffer *output_buffer;
918 
919 	WARN_ON(!mutex_is_locked(&mm_session->lock));
920 
921 	output_buffer = idr_find(&mm_session->memory_idr, handle);
922 
923 	return output_buffer;
924 }
925 
rga_mm_lookup_flag(struct rga_mm * mm_session,uint64_t handle)926 int rga_mm_lookup_flag(struct rga_mm *mm_session, uint64_t handle)
927 {
928 	struct rga_internal_buffer *output_buffer;
929 
930 	output_buffer = rga_mm_lookup_handle(mm_session, handle);
931 	if (output_buffer == NULL) {
932 		pr_err("This handle[%ld] is illegal.\n", (unsigned long)handle);
933 		return -EINVAL;
934 	}
935 
936 	return output_buffer->mm_flag;
937 }
938 
rga_mm_lookup_iova(struct rga_internal_buffer * buffer)939 dma_addr_t rga_mm_lookup_iova(struct rga_internal_buffer *buffer)
940 {
941 	if (rga_mm_is_invalid_dma_buffer(buffer->dma_buffer))
942 		return 0;
943 
944 	return buffer->dma_buffer->iova + buffer->dma_buffer->offset;
945 }
946 
rga_mm_lookup_sgt(struct rga_internal_buffer * buffer)947 struct sg_table *rga_mm_lookup_sgt(struct rga_internal_buffer *buffer)
948 {
949 	if (rga_mm_is_invalid_dma_buffer(buffer->dma_buffer))
950 		return NULL;
951 
952 	return buffer->dma_buffer->sgt;
953 }
954 
rga_mm_dump_buffer(struct rga_internal_buffer * dump_buffer)955 void rga_mm_dump_buffer(struct rga_internal_buffer *dump_buffer)
956 {
957 	pr_info("handle = %d refcount = %d mm_flag = 0x%x\n",
958 		dump_buffer->handle, kref_read(&dump_buffer->refcount),
959 		dump_buffer->mm_flag);
960 
961 	switch (dump_buffer->type) {
962 	case RGA_DMA_BUFFER:
963 	case RGA_DMA_BUFFER_PTR:
964 		if (rga_mm_is_invalid_dma_buffer(dump_buffer->dma_buffer))
965 			break;
966 
967 		pr_info("dma_buffer:\n");
968 		pr_info("dma_buf = %p, iova = 0x%lx, sgt = %p, size = %ld, map_core = 0x%x\n",
969 			dump_buffer->dma_buffer->dma_buf,
970 			(unsigned long)dump_buffer->dma_buffer->iova,
971 			dump_buffer->dma_buffer->sgt,
972 			dump_buffer->dma_buffer->size,
973 			dump_buffer->dma_buffer->scheduler->core);
974 
975 		if (dump_buffer->mm_flag & RGA_MEM_PHYSICAL_CONTIGUOUS)
976 			pr_info("is contiguous, pa = 0x%lx\n",
977 				(unsigned long)dump_buffer->phys_addr);
978 		break;
979 	case RGA_VIRTUAL_ADDRESS:
980 		if (dump_buffer->virt_addr == NULL)
981 			break;
982 
983 		pr_info("virtual address:\n");
984 		pr_info("va = 0x%lx, pages = %p, size = %ld\n",
985 			(unsigned long)dump_buffer->virt_addr->addr,
986 			dump_buffer->virt_addr->pages,
987 			dump_buffer->virt_addr->size);
988 
989 		if (rga_mm_is_invalid_dma_buffer(dump_buffer->dma_buffer))
990 			break;
991 
992 		pr_info("iova = 0x%lx, offset = 0x%lx, sgt = %p, size = %ld, map_core = 0x%x\n",
993 			(unsigned long)dump_buffer->dma_buffer->iova,
994 			(unsigned long)dump_buffer->dma_buffer->offset,
995 			dump_buffer->dma_buffer->sgt,
996 			dump_buffer->dma_buffer->size,
997 			dump_buffer->dma_buffer->scheduler->core);
998 
999 		if (dump_buffer->mm_flag & RGA_MEM_PHYSICAL_CONTIGUOUS)
1000 			pr_info("is contiguous, pa = 0x%lx\n",
1001 				(unsigned long)dump_buffer->phys_addr);
1002 		break;
1003 	case RGA_PHYSICAL_ADDRESS:
1004 		pr_info("physical address: pa = 0x%lx\n", (unsigned long)dump_buffer->phys_addr);
1005 		break;
1006 	default:
1007 		pr_err("Illegal external buffer!\n");
1008 		break;
1009 	}
1010 }
1011 
rga_mm_dump_info(struct rga_mm * mm_session)1012 void rga_mm_dump_info(struct rga_mm *mm_session)
1013 {
1014 	int id;
1015 	struct rga_internal_buffer *dump_buffer;
1016 
1017 	WARN_ON(!mutex_is_locked(&mm_session->lock));
1018 
1019 	pr_info("rga mm info:\n");
1020 
1021 	pr_info("buffer count = %d\n", mm_session->buffer_count);
1022 	pr_info("===============================================================\n");
1023 
1024 	idr_for_each_entry(&mm_session->memory_idr, dump_buffer, id) {
1025 		rga_mm_dump_buffer(dump_buffer);
1026 
1027 		pr_info("---------------------------------------------------------------\n");
1028 	}
1029 }
1030 
rga_mm_is_need_mmu(struct rga_job * job,struct rga_internal_buffer * buffer)1031 static bool rga_mm_is_need_mmu(struct rga_job *job, struct rga_internal_buffer *buffer)
1032 {
1033 	if (buffer == NULL || job == NULL || job->scheduler == NULL)
1034 		return false;
1035 
1036 	/* RK_IOMMU no need to configure enable or not in the driver. */
1037 	if (job->scheduler->data->mmu == RGA_IOMMU)
1038 		return false;
1039 
1040 	/* RK_MMU need to configure enable or not in the driver. */
1041 	if (buffer->mm_flag & RGA_MEM_PHYSICAL_CONTIGUOUS)
1042 		return false;
1043 	else if (buffer->mm_flag & RGA_MEM_NEED_USE_IOMMU)
1044 		return true;
1045 
1046 	return false;
1047 }
1048 
rga_mm_set_mmu_flag(struct rga_job * job)1049 static int rga_mm_set_mmu_flag(struct rga_job *job)
1050 {
1051 	struct rga_mmu_t *mmu_info;
1052 	int src_mmu_en;
1053 	int src1_mmu_en;
1054 	int dst_mmu_en;
1055 	int els_mmu_en;
1056 
1057 	src_mmu_en = rga_mm_is_need_mmu(job, job->src_buffer.addr);
1058 	src1_mmu_en = rga_mm_is_need_mmu(job, job->src1_buffer.addr);
1059 	dst_mmu_en = rga_mm_is_need_mmu(job, job->dst_buffer.addr);
1060 	els_mmu_en = rga_mm_is_need_mmu(job, job->els_buffer.addr);
1061 
1062 	mmu_info = &job->rga_command_base.mmu_info;
1063 	memset(mmu_info, 0x0, sizeof(*mmu_info));
1064 	if (src_mmu_en)
1065 		mmu_info->mmu_flag |= (0x1 << 8);
1066 	if (src1_mmu_en)
1067 		mmu_info->mmu_flag |= (0x1 << 9);
1068 	if (dst_mmu_en)
1069 		mmu_info->mmu_flag |= (0x1 << 10);
1070 	if (els_mmu_en)
1071 		mmu_info->mmu_flag |= (0x1 << 11);
1072 
1073 	if (mmu_info->mmu_flag & (0xf << 8)) {
1074 		mmu_info->mmu_flag |= 1;
1075 		mmu_info->mmu_flag |= 1 << 31;
1076 		mmu_info->mmu_en  = 1;
1077 	}
1078 
1079 	return 0;
1080 }
1081 
rga_mm_sgt_to_page_table(struct sg_table * sg,uint32_t * page_table,int32_t pageCount,int32_t use_dma_address)1082 static int rga_mm_sgt_to_page_table(struct sg_table *sg,
1083 				    uint32_t *page_table,
1084 				    int32_t pageCount,
1085 				    int32_t use_dma_address)
1086 {
1087 	uint32_t i;
1088 	unsigned long Address;
1089 	uint32_t mapped_size = 0;
1090 	uint32_t len;
1091 	struct scatterlist *sgl = sg->sgl;
1092 	uint32_t sg_num = 0;
1093 	uint32_t break_flag = 0;
1094 
1095 	do {
1096 		/*
1097 		 *   The length of each sgl is expected to be obtained here, not
1098 		 * the length of the entire dma_buf, so sg_dma_len() is not used.
1099 		 */
1100 		len = sgl->length >> PAGE_SHIFT;
1101 
1102 		if (use_dma_address)
1103 			/*
1104 			 *   The fd passed by user space gets sg through
1105 			 * dma_buf_map_attachment, so dma_address can
1106 			 * be use here.
1107 			 *   When the mapped device does not have iommu, it will
1108 			 * return the first address of the real physical page
1109 			 * when it meets the requirements of the current device,
1110 			 * and will trigger swiotlb when it does not meet the
1111 			 * requirements to obtain a software-mapped physical
1112 			 * address that is mapped to meet the device address
1113 			 * requirements.
1114 			 */
1115 			Address = sg_dma_address(sgl);
1116 		else
1117 			Address = sg_phys(sgl);
1118 
1119 		for (i = 0; i < len; i++) {
1120 			if (mapped_size + i >= pageCount) {
1121 				break_flag = 1;
1122 				break;
1123 			}
1124 			page_table[mapped_size + i] = (uint32_t)(Address + (i << PAGE_SHIFT));
1125 		}
1126 		if (break_flag)
1127 			break;
1128 		mapped_size += len;
1129 		sg_num += 1;
1130 	} while ((sgl = sg_next(sgl)) && (mapped_size < pageCount) && (sg_num < sg->orig_nents));
1131 
1132 	return 0;
1133 }
1134 
rga_mm_set_mmu_base(struct rga_job * job,struct rga_img_info_t * img,struct rga_job_buffer * job_buf)1135 static int rga_mm_set_mmu_base(struct rga_job *job,
1136 			       struct rga_img_info_t *img,
1137 			       struct rga_job_buffer *job_buf)
1138 {
1139 	int ret;
1140 	int yrgb_count = 0;
1141 	int uv_count = 0;
1142 	int v_count = 0;
1143 	int page_count = 0;
1144 	int order = 0;
1145 	uint32_t *page_table = NULL;
1146 	struct sg_table *sgt = NULL;
1147 
1148 	int img_size, yrgb_size, uv_size, v_size;
1149 	int img_offset = 0;
1150 	int yrgb_offset = 0;
1151 	int uv_offset = 0;
1152 	int v_offset = 0;
1153 
1154 	img_size = rga_image_size_cal(img->vir_w, img->vir_h, img->format,
1155 				      &yrgb_size, &uv_size, &v_size);
1156 	if (img_size <= 0) {
1157 		pr_err("Image size cal error! width = %d, height = %d, format = %s\n",
1158 		       img->vir_w, img->vir_h, rga_get_format_name(img->format));
1159 		return -EINVAL;
1160 	}
1161 
1162 	/* using third-address */
1163 	if (job_buf->uv_addr) {
1164 		if (job_buf->y_addr->virt_addr != NULL)
1165 			yrgb_offset = job_buf->y_addr->virt_addr->offset;
1166 		if (job_buf->uv_addr->virt_addr != NULL)
1167 			uv_offset = job_buf->uv_addr->virt_addr->offset;
1168 		if (job_buf->v_addr->virt_addr != NULL)
1169 			v_offset = job_buf->v_addr->virt_addr->offset;
1170 
1171 		yrgb_count = RGA_GET_PAGE_COUNT(yrgb_size + yrgb_offset);
1172 		uv_count = RGA_GET_PAGE_COUNT(uv_size + uv_offset);
1173 		v_count = RGA_GET_PAGE_COUNT(v_size + v_offset);
1174 		page_count = yrgb_count + uv_count + v_count;
1175 
1176 		if (page_count <= 0) {
1177 			pr_err("page count cal error! yrba = %d, uv = %d, v = %d\n",
1178 			       yrgb_count, uv_count, v_count);
1179 			return -EFAULT;
1180 		}
1181 
1182 		if (job->flags & RGA_JOB_USE_HANDLE) {
1183 			order = get_order(page_count * sizeof(uint32_t *));
1184 			if (order >= MAX_ORDER) {
1185 				pr_err("Can not alloc pages with order[%d] for page_table, max_order = %d\n",
1186 				       order, MAX_ORDER);
1187 				return -ENOMEM;
1188 			}
1189 
1190 			page_table = (uint32_t *)__get_free_pages(GFP_KERNEL | GFP_DMA32, order);
1191 			if (page_table == NULL) {
1192 				pr_err("%s can not alloc pages for page_table, order = %d\n",
1193 				       __func__, order);
1194 				return -ENOMEM;
1195 			}
1196 		} else {
1197 			mutex_lock(&rga_drvdata->lock);
1198 
1199 			page_table = rga_mmu_buf_get(rga_drvdata->mmu_base, page_count);
1200 			if (page_table == NULL) {
1201 				pr_err("mmu_buf get error!\n");
1202 				mutex_unlock(&rga_drvdata->lock);
1203 				return -EFAULT;
1204 			}
1205 
1206 			mutex_unlock(&rga_drvdata->lock);
1207 		}
1208 
1209 		sgt = rga_mm_lookup_sgt(job_buf->y_addr);
1210 		if (sgt == NULL) {
1211 			pr_err("rga2 cannot get sgt from internal buffer!\n");
1212 			ret = -EINVAL;
1213 			goto err_free_page_table;
1214 		}
1215 		rga_mm_sgt_to_page_table(sgt, page_table, yrgb_count, false);
1216 
1217 		sgt = rga_mm_lookup_sgt(job_buf->uv_addr);
1218 		if (sgt == NULL) {
1219 			pr_err("rga2 cannot get sgt from internal buffer!\n");
1220 			ret = -EINVAL;
1221 			goto err_free_page_table;
1222 		}
1223 		rga_mm_sgt_to_page_table(sgt, page_table + yrgb_count, uv_count, false);
1224 
1225 		sgt = rga_mm_lookup_sgt(job_buf->v_addr);
1226 		if (sgt == NULL) {
1227 			pr_err("rga2 cannot get sgt from internal buffer!\n");
1228 			ret = -EINVAL;
1229 			goto err_free_page_table;
1230 		}
1231 		rga_mm_sgt_to_page_table(sgt, page_table + yrgb_count + uv_count, v_count, false);
1232 
1233 		img->yrgb_addr = yrgb_offset;
1234 		img->uv_addr = (yrgb_count << PAGE_SHIFT) + uv_offset;
1235 		img->v_addr = ((yrgb_count + uv_count) << PAGE_SHIFT) + v_offset;
1236 	} else {
1237 		if (job_buf->addr->virt_addr != NULL)
1238 			img_offset = job_buf->addr->virt_addr->offset;
1239 
1240 		page_count = RGA_GET_PAGE_COUNT(img_size + img_offset);
1241 		if (page_count < 0) {
1242 			pr_err("page count cal error! yrba = %d, uv = %d, v = %d\n",
1243 			       yrgb_count, uv_count, v_count);
1244 			return -EFAULT;
1245 		}
1246 
1247 		if (job->flags & RGA_JOB_USE_HANDLE) {
1248 			order = get_order(page_count * sizeof(uint32_t *));
1249 			if (order >= MAX_ORDER) {
1250 				pr_err("Can not alloc pages with order[%d] for page_table, max_order = %d\n",
1251 				       order, MAX_ORDER);
1252 				return -ENOMEM;
1253 			}
1254 
1255 			page_table = (uint32_t *)__get_free_pages(GFP_KERNEL | GFP_DMA32, order);
1256 			if (page_table == NULL) {
1257 				pr_err("%s can not alloc pages for page_table, order = %d\n",
1258 				       __func__, order);
1259 				return -ENOMEM;
1260 			}
1261 		} else {
1262 			mutex_lock(&rga_drvdata->lock);
1263 
1264 			page_table = rga_mmu_buf_get(rga_drvdata->mmu_base, page_count);
1265 			if (page_table == NULL) {
1266 				pr_err("mmu_buf get error!\n");
1267 				mutex_unlock(&rga_drvdata->lock);
1268 				return -EFAULT;
1269 			}
1270 
1271 			mutex_unlock(&rga_drvdata->lock);
1272 		}
1273 
1274 		sgt = rga_mm_lookup_sgt(job_buf->addr);
1275 		if (sgt == NULL) {
1276 			pr_err("rga2 cannot get sgt from internal buffer!\n");
1277 			ret = -EINVAL;
1278 			goto err_free_page_table;
1279 		}
1280 		rga_mm_sgt_to_page_table(sgt, page_table, page_count, false);
1281 
1282 		img->yrgb_addr = img_offset;
1283 		rga_convert_addr(img, false);
1284 	}
1285 
1286 	job_buf->page_table = page_table;
1287 	job_buf->order = order;
1288 	job_buf->page_count = page_count;
1289 
1290 	return 0;
1291 
1292 err_free_page_table:
1293 	if (job->flags & RGA_JOB_USE_HANDLE)
1294 		free_pages((unsigned long)page_table, order);
1295 	return ret;
1296 }
1297 
rga_mm_sync_dma_sg_for_device(struct rga_internal_buffer * buffer,struct rga_job * job,enum dma_data_direction dir)1298 static int rga_mm_sync_dma_sg_for_device(struct rga_internal_buffer *buffer,
1299 					 struct rga_job *job,
1300 					 enum dma_data_direction dir)
1301 {
1302 	struct sg_table *sgt;
1303 	struct rga_scheduler_t *scheduler;
1304 
1305 	sgt = rga_mm_lookup_sgt(buffer);
1306 	if (sgt == NULL) {
1307 		pr_err("%s(%d), failed to get sgt, core = 0x%x\n",
1308 		       __func__, __LINE__, job->core);
1309 		return -EINVAL;
1310 	}
1311 
1312 	scheduler = buffer->dma_buffer->scheduler;
1313 	if (scheduler == NULL) {
1314 		pr_err("%s(%d), failed to get scheduler, core = 0x%x\n",
1315 		       __func__, __LINE__, job->core);
1316 		return -EFAULT;
1317 	}
1318 
1319 	dma_sync_sg_for_device(scheduler->dev, sgt->sgl, sgt->orig_nents, dir);
1320 
1321 	return 0;
1322 }
1323 
rga_mm_sync_dma_sg_for_cpu(struct rga_internal_buffer * buffer,struct rga_job * job,enum dma_data_direction dir)1324 static int rga_mm_sync_dma_sg_for_cpu(struct rga_internal_buffer *buffer,
1325 				      struct rga_job *job,
1326 				      enum dma_data_direction dir)
1327 {
1328 	struct sg_table *sgt;
1329 	struct rga_scheduler_t *scheduler;
1330 
1331 	sgt = rga_mm_lookup_sgt(buffer);
1332 	if (sgt == NULL) {
1333 		pr_err("%s(%d), failed to get sgt, core = 0x%x\n",
1334 		       __func__, __LINE__, job->core);
1335 		return -EINVAL;
1336 	}
1337 
1338 	scheduler = buffer->dma_buffer->scheduler;
1339 	if (scheduler == NULL) {
1340 		pr_err("%s(%d), failed to get scheduler, core = 0x%x\n",
1341 		       __func__, __LINE__, job->core);
1342 		return -EFAULT;
1343 	}
1344 
1345 	dma_sync_sg_for_cpu(scheduler->dev, sgt->sgl, sgt->orig_nents, dir);
1346 
1347 	return 0;
1348 }
1349 
rga_mm_get_buffer_info(struct rga_job * job,struct rga_internal_buffer * internal_buffer,uint64_t * channel_addr)1350 static int rga_mm_get_buffer_info(struct rga_job *job,
1351 				  struct rga_internal_buffer *internal_buffer,
1352 				  uint64_t *channel_addr)
1353 {
1354 	uint64_t addr;
1355 
1356 	switch (job->scheduler->data->mmu) {
1357 	case RGA_IOMMU:
1358 		addr = rga_mm_lookup_iova(internal_buffer);
1359 		if (addr == 0) {
1360 			pr_err("core[%d] lookup buffer_type[0x%x] iova error!\n",
1361 			       job->core, internal_buffer->type);
1362 			return -EINVAL;
1363 		}
1364 		break;
1365 	case RGA_MMU:
1366 	default:
1367 		if (internal_buffer->mm_flag & RGA_MEM_PHYSICAL_CONTIGUOUS) {
1368 			addr = internal_buffer->phys_addr;
1369 			break;
1370 		}
1371 
1372 		switch (internal_buffer->type) {
1373 		case RGA_DMA_BUFFER:
1374 		case RGA_DMA_BUFFER_PTR:
1375 			addr = 0;
1376 			break;
1377 		case RGA_VIRTUAL_ADDRESS:
1378 			addr = internal_buffer->virt_addr->addr;
1379 			break;
1380 		case RGA_PHYSICAL_ADDRESS:
1381 			addr = internal_buffer->phys_addr;
1382 			break;
1383 		default:
1384 			pr_err("Illegal external buffer!\n");
1385 			return -EFAULT;
1386 		}
1387 		break;
1388 	}
1389 
1390 	*channel_addr = addr;
1391 
1392 	return 0;
1393 }
1394 
rga_mm_get_buffer(struct rga_mm * mm,struct rga_job * job,uint64_t handle,uint64_t * channel_addr,struct rga_internal_buffer ** buf,int require_size,enum dma_data_direction dir)1395 static int rga_mm_get_buffer(struct rga_mm *mm,
1396 			     struct rga_job *job,
1397 			     uint64_t handle,
1398 			     uint64_t *channel_addr,
1399 			     struct rga_internal_buffer **buf,
1400 			     int require_size,
1401 			     enum dma_data_direction dir)
1402 {
1403 	int ret = 0;
1404 	struct rga_internal_buffer *internal_buffer = NULL;
1405 
1406 	if (handle == 0) {
1407 		pr_err("No buffer handle can be used!\n");
1408 		return -EFAULT;
1409 	}
1410 
1411 	mutex_lock(&mm->lock);
1412 	*buf = rga_mm_lookup_handle(mm, handle);
1413 	if (*buf == NULL) {
1414 		pr_err("This handle[%ld] is illegal.\n", (unsigned long)handle);
1415 
1416 		mutex_unlock(&mm->lock);
1417 		return -EFAULT;
1418 	}
1419 
1420 	internal_buffer = *buf;
1421 	kref_get(&internal_buffer->refcount);
1422 
1423 	if (DEBUGGER_EN(MM)) {
1424 		pr_info("handle[%d] get info:\n", (int)handle);
1425 		rga_mm_dump_buffer(internal_buffer);
1426 	}
1427 
1428 	mutex_unlock(&mm->lock);
1429 
1430 	ret = rga_mm_get_buffer_info(job, internal_buffer, channel_addr);
1431 	if (ret < 0) {
1432 		pr_err("handle[%ld] failed to get internal buffer info!\n", (unsigned long)handle);
1433 		return ret;
1434 	}
1435 
1436 	if (internal_buffer->size < require_size) {
1437 		ret = -EINVAL;
1438 		pr_err("Only get buffer %ld byte from handle[%ld], but current required %d byte\n",
1439 		       internal_buffer->size, (unsigned long)handle, require_size);
1440 
1441 		goto put_internal_buffer;
1442 	}
1443 
1444 	if (internal_buffer->mm_flag & RGA_MEM_FORCE_FLUSH_CACHE) {
1445 		/*
1446 		 * Some userspace virtual addresses do not have an
1447 		 * interface for flushing the cache, so it is mandatory
1448 		 * to flush the cache when the virtual address is used.
1449 		 */
1450 		ret = rga_mm_sync_dma_sg_for_device(internal_buffer, job, dir);
1451 		if (ret < 0) {
1452 			pr_err("sync sgt for device error!\n");
1453 			goto put_internal_buffer;
1454 		}
1455 	}
1456 
1457 	return 0;
1458 
1459 put_internal_buffer:
1460 	mutex_lock(&mm->lock);
1461 	kref_put(&internal_buffer->refcount, rga_mm_kref_release_buffer);
1462 	mutex_unlock(&mm->lock);
1463 
1464 	return ret;
1465 
1466 }
1467 
rga_mm_put_buffer(struct rga_mm * mm,struct rga_job * job,struct rga_internal_buffer * internal_buffer,enum dma_data_direction dir)1468 static void rga_mm_put_buffer(struct rga_mm *mm,
1469 			      struct rga_job *job,
1470 			      struct rga_internal_buffer *internal_buffer,
1471 			      enum dma_data_direction dir)
1472 {
1473 	if (internal_buffer->mm_flag & RGA_MEM_FORCE_FLUSH_CACHE && dir != DMA_NONE)
1474 		if (rga_mm_sync_dma_sg_for_cpu(internal_buffer, job, dir))
1475 			pr_err("sync sgt for cpu error!\n");
1476 
1477 	mutex_lock(&mm->lock);
1478 	kref_put(&internal_buffer->refcount, rga_mm_kref_release_buffer);
1479 	mutex_unlock(&mm->lock);
1480 }
1481 
rga_mm_put_channel_handle_info(struct rga_mm * mm,struct rga_job * job,struct rga_job_buffer * job_buf,enum dma_data_direction dir)1482 static void rga_mm_put_channel_handle_info(struct rga_mm *mm,
1483 					   struct rga_job *job,
1484 					   struct rga_job_buffer *job_buf,
1485 					   enum dma_data_direction dir)
1486 {
1487 	if (job_buf->y_addr)
1488 		rga_mm_put_buffer(mm, job, job_buf->y_addr, dir);
1489 	if (job_buf->uv_addr)
1490 		rga_mm_put_buffer(mm, job, job_buf->uv_addr, dir);
1491 	if (job_buf->v_addr)
1492 		rga_mm_put_buffer(mm, job, job_buf->v_addr, dir);
1493 
1494 	if (job_buf->page_table)
1495 		free_pages((unsigned long)job_buf->page_table, job_buf->order);
1496 }
1497 
rga_mm_get_channel_handle_info(struct rga_mm * mm,struct rga_job * job,struct rga_img_info_t * img,struct rga_job_buffer * job_buf,enum dma_data_direction dir)1498 static int rga_mm_get_channel_handle_info(struct rga_mm *mm,
1499 					  struct rga_job *job,
1500 					  struct rga_img_info_t *img,
1501 					  struct rga_job_buffer *job_buf,
1502 					  enum dma_data_direction dir)
1503 {
1504 	int ret = 0;
1505 	int handle = 0;
1506 	int img_size, yrgb_size, uv_size, v_size;
1507 
1508 	img_size = rga_image_size_cal(img->vir_w, img->vir_h, img->format,
1509 				      &yrgb_size, &uv_size, &v_size);
1510 	if (img_size <= 0) {
1511 		pr_err("Image size cal error! width = %d, height = %d, format = %s\n",
1512 		       img->vir_w, img->vir_h, rga_get_format_name(img->format));
1513 		return -EINVAL;
1514 	}
1515 
1516 	/* using third-address */
1517 	if (img->uv_addr > 0) {
1518 		handle = img->yrgb_addr;
1519 		if (handle > 0) {
1520 			ret = rga_mm_get_buffer(mm, job, handle, &img->yrgb_addr,
1521 						&job_buf->y_addr, yrgb_size, dir);
1522 			if (ret < 0) {
1523 				pr_err("handle[%d] Can't get y/rgb address info!\n", handle);
1524 				return ret;
1525 			}
1526 		}
1527 
1528 		handle = img->uv_addr;
1529 		if (handle > 0) {
1530 			ret = rga_mm_get_buffer(mm, job, handle, &img->uv_addr,
1531 						&job_buf->uv_addr, uv_size, dir);
1532 			if (ret < 0) {
1533 				pr_err("handle[%d] Can't get uv address info!\n", handle);
1534 				return ret;
1535 			}
1536 		}
1537 
1538 		handle = img->v_addr;
1539 		if (handle > 0) {
1540 			ret = rga_mm_get_buffer(mm, job, handle, &img->v_addr,
1541 						&job_buf->v_addr, v_size, dir);
1542 			if (ret < 0) {
1543 				pr_err("handle[%d] Can't get uv address info!\n", handle);
1544 				return ret;
1545 			}
1546 		}
1547 	} else {
1548 		handle = img->yrgb_addr;
1549 		if (handle > 0) {
1550 			ret = rga_mm_get_buffer(mm, job, handle, &img->yrgb_addr,
1551 						&job_buf->addr, img_size, dir);
1552 			if (ret < 0) {
1553 				pr_err("handle[%d] Can't get y/rgb address info!\n", handle);
1554 				return ret;
1555 			}
1556 		}
1557 
1558 		rga_convert_addr(img, false);
1559 	}
1560 
1561 	if (job->scheduler->data->mmu == RGA_MMU &&
1562 	    rga_mm_is_need_mmu(job, job_buf->addr)) {
1563 		ret = rga_mm_set_mmu_base(job, img, job_buf);
1564 		if (ret < 0) {
1565 			pr_err("Can't set RGA2 MMU_BASE from handle!\n");
1566 
1567 			rga_mm_put_channel_handle_info(mm, job, job_buf, dir);
1568 			return ret;
1569 		}
1570 	}
1571 
1572 	return 0;
1573 }
1574 
rga_mm_get_handle_info(struct rga_job * job)1575 static int rga_mm_get_handle_info(struct rga_job *job)
1576 {
1577 	int ret = 0;
1578 	struct rga_req *req = NULL;
1579 	struct rga_mm *mm = NULL;
1580 	enum dma_data_direction dir;
1581 
1582 	req = &job->rga_command_base;
1583 	mm = rga_drvdata->mm;
1584 
1585 	if (likely(req->src.yrgb_addr > 0)) {
1586 		ret = rga_mm_get_channel_handle_info(mm, job, &req->src,
1587 						     &job->src_buffer,
1588 						     DMA_TO_DEVICE);
1589 		if (ret < 0) {
1590 			pr_err("Can't get src buffer info from handle!\n");
1591 			return ret;
1592 		}
1593 	}
1594 
1595 	if (likely(req->dst.yrgb_addr > 0)) {
1596 		ret = rga_mm_get_channel_handle_info(mm, job, &req->dst,
1597 						     &job->dst_buffer,
1598 						     DMA_TO_DEVICE);
1599 		if (ret < 0) {
1600 			pr_err("Can't get dst buffer info from handle!\n");
1601 			return ret;
1602 		}
1603 	}
1604 
1605 	if (likely(req->pat.yrgb_addr > 0)) {
1606 
1607 		if (req->render_mode != UPDATE_PALETTE_TABLE_MODE) {
1608 			if (req->bsfilter_flag)
1609 				dir = DMA_BIDIRECTIONAL;
1610 			else
1611 				dir = DMA_TO_DEVICE;
1612 
1613 			ret = rga_mm_get_channel_handle_info(mm, job, &req->pat,
1614 							     &job->src1_buffer,
1615 							     dir);
1616 		} else {
1617 			ret = rga_mm_get_channel_handle_info(mm, job, &req->pat,
1618 							     &job->els_buffer,
1619 							     DMA_BIDIRECTIONAL);
1620 		}
1621 		if (ret < 0) {
1622 			pr_err("Can't get pat buffer info from handle!\n");
1623 			return ret;
1624 		}
1625 	}
1626 
1627 	rga_mm_set_mmu_flag(job);
1628 
1629 	return 0;
1630 }
1631 
rga_mm_put_handle_info(struct rga_job * job)1632 static void rga_mm_put_handle_info(struct rga_job *job)
1633 {
1634 	struct rga_mm *mm = rga_drvdata->mm;
1635 
1636 	rga_mm_put_channel_handle_info(mm, job, &job->src_buffer, DMA_NONE);
1637 	rga_mm_put_channel_handle_info(mm, job, &job->dst_buffer, DMA_FROM_DEVICE);
1638 	rga_mm_put_channel_handle_info(mm, job, &job->src1_buffer, DMA_NONE);
1639 	rga_mm_put_channel_handle_info(mm, job, &job->els_buffer, DMA_NONE);
1640 }
1641 
rga_mm_put_channel_external_buffer(struct rga_job_buffer * job_buffer)1642 static void rga_mm_put_channel_external_buffer(struct rga_job_buffer *job_buffer)
1643 {
1644 	if (job_buffer->ex_addr->type == RGA_DMA_BUFFER_PTR)
1645 		dma_buf_put((struct dma_buf *)(unsigned long)job_buffer->ex_addr->memory);
1646 
1647 	kfree(job_buffer->ex_addr);
1648 	job_buffer->ex_addr = NULL;
1649 }
1650 
rga_mm_get_channel_external_buffer(int mmu_flag,struct rga_img_info_t * img_info,struct rga_job_buffer * job_buffer)1651 static int rga_mm_get_channel_external_buffer(int mmu_flag,
1652 					      struct rga_img_info_t *img_info,
1653 					      struct rga_job_buffer *job_buffer)
1654 {
1655 	struct dma_buf *dma_buf = NULL;
1656 	struct rga_external_buffer *external_buffer = NULL;
1657 
1658 	/* Default unsupported multi-planar format */
1659 	external_buffer = kzalloc(sizeof(*external_buffer), GFP_KERNEL);
1660 	if (external_buffer == NULL) {
1661 		pr_err("Cannot alloc job_buffer!\n");
1662 		return -ENOMEM;
1663 	}
1664 
1665 	if (img_info->yrgb_addr) {
1666 		dma_buf = dma_buf_get(img_info->yrgb_addr);
1667 		if (IS_ERR(dma_buf)) {
1668 			pr_err("%s dma_buf_get fail fd[%lu]\n",
1669 			       __func__, (unsigned long)img_info->yrgb_addr);
1670 			kfree(external_buffer);
1671 			return -EINVAL;
1672 		}
1673 
1674 		external_buffer->memory = (unsigned long)dma_buf;
1675 		external_buffer->type = RGA_DMA_BUFFER_PTR;
1676 	} else if (mmu_flag && img_info->uv_addr) {
1677 		external_buffer->memory = (uint64_t)img_info->uv_addr;
1678 		external_buffer->type = RGA_VIRTUAL_ADDRESS;
1679 	} else if (img_info->uv_addr) {
1680 		external_buffer->memory = (uint64_t)img_info->uv_addr;
1681 		external_buffer->type = RGA_PHYSICAL_ADDRESS;
1682 	} else {
1683 		kfree(external_buffer);
1684 		return -EINVAL;
1685 	}
1686 
1687 	external_buffer->memory_parm.width = img_info->vir_w;
1688 	external_buffer->memory_parm.height = img_info->vir_h;
1689 	external_buffer->memory_parm.format = img_info->format;
1690 
1691 	job_buffer->ex_addr = external_buffer;
1692 
1693 	return 0;
1694 }
1695 
rga_mm_put_external_buffer(struct rga_job * job)1696 static void rga_mm_put_external_buffer(struct rga_job *job)
1697 {
1698 	if (job->src_buffer.ex_addr)
1699 		rga_mm_put_channel_external_buffer(&job->src_buffer);
1700 	if (job->src1_buffer.ex_addr)
1701 		rga_mm_put_channel_external_buffer(&job->src1_buffer);
1702 	if (job->dst_buffer.ex_addr)
1703 		rga_mm_put_channel_external_buffer(&job->dst_buffer);
1704 	if (job->els_buffer.ex_addr)
1705 		rga_mm_put_channel_external_buffer(&job->els_buffer);
1706 }
1707 
rga_mm_get_external_buffer(struct rga_job * job)1708 static int rga_mm_get_external_buffer(struct rga_job *job)
1709 {
1710 	int ret = -EINVAL;
1711 	int mmu_flag;
1712 
1713 	struct rga_img_info_t *src0 = NULL;
1714 	struct rga_img_info_t *src1 = NULL;
1715 	struct rga_img_info_t *dst = NULL;
1716 	struct rga_img_info_t *els = NULL;
1717 
1718 	if (job->rga_command_base.render_mode != COLOR_FILL_MODE)
1719 		src0 = &job->rga_command_base.src;
1720 
1721 	if (job->rga_command_base.render_mode != UPDATE_PALETTE_TABLE_MODE)
1722 		src1 = job->rga_command_base.bsfilter_flag ?
1723 		       &job->rga_command_base.pat : NULL;
1724 	else
1725 		els = &job->rga_command_base.pat;
1726 
1727 	dst = &job->rga_command_base.dst;
1728 
1729 	if (likely(src0)) {
1730 		mmu_flag = ((job->rga_command_base.mmu_info.mmu_flag >> 8) & 1);
1731 		ret = rga_mm_get_channel_external_buffer(mmu_flag, src0, &job->src_buffer);
1732 		if (ret < 0) {
1733 			pr_err("Cannot get src0 channel buffer!\n");
1734 			return ret;
1735 		}
1736 	}
1737 
1738 	if (likely(dst)) {
1739 		mmu_flag = ((job->rga_command_base.mmu_info.mmu_flag >> 10) & 1);
1740 		ret = rga_mm_get_channel_external_buffer(mmu_flag, dst, &job->dst_buffer);
1741 		if (ret < 0) {
1742 			pr_err("Cannot get dst channel buffer!\n");
1743 			goto error_put_buffer;
1744 		}
1745 	}
1746 
1747 	if (src1) {
1748 		mmu_flag = ((job->rga_command_base.mmu_info.mmu_flag >> 9) & 1);
1749 		ret = rga_mm_get_channel_external_buffer(mmu_flag, src1, &job->src1_buffer);
1750 		if (ret < 0) {
1751 			pr_err("Cannot get src1 channel buffer!\n");
1752 			goto error_put_buffer;
1753 		}
1754 	}
1755 
1756 	if (els) {
1757 		mmu_flag = ((job->rga_command_base.mmu_info.mmu_flag >> 11) & 1);
1758 		ret = rga_mm_get_channel_external_buffer(mmu_flag, els, &job->els_buffer);
1759 		if (ret < 0) {
1760 			pr_err("Cannot get els channel buffer!\n");
1761 			goto error_put_buffer;
1762 		}
1763 	}
1764 
1765 	return 0;
1766 error_put_buffer:
1767 	rga_mm_put_external_buffer(job);
1768 	return ret;
1769 }
1770 
rga_mm_unmap_channel_job_buffer(struct rga_job * job,struct rga_job_buffer * job_buffer,enum dma_data_direction dir)1771 static void rga_mm_unmap_channel_job_buffer(struct rga_job *job,
1772 					    struct rga_job_buffer *job_buffer,
1773 					    enum dma_data_direction dir)
1774 {
1775 	if (job_buffer->addr->mm_flag & RGA_MEM_FORCE_FLUSH_CACHE && dir != DMA_NONE)
1776 		if (rga_mm_sync_dma_sg_for_cpu(job_buffer->addr, job, dir))
1777 			pr_err("sync sgt for cpu error!\n");
1778 
1779 	rga_mm_unmap_buffer(job_buffer->addr);
1780 	kfree(job_buffer->addr);
1781 
1782 	job_buffer->page_table = NULL;
1783 }
1784 
rga_mm_map_channel_job_buffer(struct rga_job * job,struct rga_img_info_t * img,struct rga_job_buffer * job_buffer,enum dma_data_direction dir,int write_flag)1785 static int rga_mm_map_channel_job_buffer(struct rga_job *job,
1786 					 struct rga_img_info_t *img,
1787 					 struct rga_job_buffer *job_buffer,
1788 					 enum dma_data_direction dir,
1789 					 int write_flag)
1790 {
1791 	int ret;
1792 	struct rga_internal_buffer *buffer = NULL;
1793 
1794 	buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
1795 	if (buffer == NULL) {
1796 		pr_err("%s alloc internal_buffer error!\n", __func__);
1797 		return -ENOMEM;
1798 	}
1799 
1800 	ret = rga_mm_map_buffer(job_buffer->ex_addr, buffer, job, write_flag);
1801 	if (ret < 0) {
1802 		pr_err("job buffer map failed!\n");
1803 		goto error_free_buffer;
1804 	}
1805 
1806 	ret = rga_mm_get_buffer_info(job, buffer, &img->yrgb_addr);
1807 	if (ret < 0) {
1808 		pr_err("Failed to get internal buffer info!\n");
1809 		goto error_unmap_buffer;
1810 	}
1811 
1812 	if (buffer->mm_flag & RGA_MEM_FORCE_FLUSH_CACHE) {
1813 		ret = rga_mm_sync_dma_sg_for_device(buffer, job, dir);
1814 		if (ret < 0) {
1815 			pr_err("sync sgt for device error!\n");
1816 			goto error_unmap_buffer;
1817 		}
1818 	}
1819 
1820 	rga_convert_addr(img, false);
1821 
1822 	job_buffer->addr = buffer;
1823 
1824 	if (job->scheduler->data->mmu == RGA_MMU &&
1825 	    rga_mm_is_need_mmu(job, job_buffer->addr)) {
1826 		ret = rga_mm_set_mmu_base(job, img, job_buffer);
1827 		if (ret < 0) {
1828 			pr_err("Can't set RGA2 MMU_BASE!\n");
1829 			job_buffer->addr = NULL;
1830 			goto error_unmap_buffer;
1831 		}
1832 	}
1833 
1834 	return 0;
1835 
1836 error_unmap_buffer:
1837 	rga_mm_unmap_buffer(buffer);
1838 error_free_buffer:
1839 	kfree(buffer);
1840 
1841 	return ret;
1842 }
1843 
rga_mm_unmap_buffer_info(struct rga_job * job)1844 static void rga_mm_unmap_buffer_info(struct rga_job *job)
1845 {
1846 	if (job->src_buffer.addr)
1847 		rga_mm_unmap_channel_job_buffer(job, &job->src_buffer, DMA_NONE);
1848 	if (job->dst_buffer.addr)
1849 		rga_mm_unmap_channel_job_buffer(job, &job->dst_buffer, DMA_FROM_DEVICE);
1850 	if (job->src1_buffer.addr)
1851 		rga_mm_unmap_channel_job_buffer(job, &job->src1_buffer, DMA_NONE);
1852 	if (job->els_buffer.addr)
1853 		rga_mm_unmap_channel_job_buffer(job, &job->els_buffer, DMA_NONE);
1854 
1855 	rga_mm_put_external_buffer(job);
1856 }
1857 
rga_mm_map_buffer_info(struct rga_job * job)1858 static int rga_mm_map_buffer_info(struct rga_job *job)
1859 {
1860 	int ret = 0;
1861 	struct rga_req *req = NULL;
1862 	enum dma_data_direction dir;
1863 
1864 	ret = rga_mm_get_external_buffer(job);
1865 	if (ret < 0) {
1866 		pr_err("failed to get external buffer from job_cmd!\n");
1867 		return ret;
1868 	}
1869 
1870 	req = &job->rga_command_base;
1871 
1872 	if (likely(job->src_buffer.ex_addr)) {
1873 		ret = rga_mm_map_channel_job_buffer(job, &req->src,
1874 						    &job->src_buffer,
1875 						    DMA_TO_DEVICE, false);
1876 		if (ret < 0) {
1877 			pr_err("src channel map job buffer failed!");
1878 			goto error_unmap_buffer;
1879 		}
1880 	}
1881 
1882 	if (likely(job->dst_buffer.ex_addr)) {
1883 		ret = rga_mm_map_channel_job_buffer(job, &req->dst,
1884 						    &job->dst_buffer,
1885 						    DMA_TO_DEVICE, true);
1886 		if (ret < 0) {
1887 			pr_err("dst channel map job buffer failed!");
1888 			goto error_unmap_buffer;
1889 		}
1890 	}
1891 
1892 	if (job->src1_buffer.ex_addr) {
1893 		if (req->bsfilter_flag)
1894 			dir = DMA_BIDIRECTIONAL;
1895 		else
1896 			dir = DMA_TO_DEVICE;
1897 
1898 		ret = rga_mm_map_channel_job_buffer(job, &req->pat,
1899 						    &job->src1_buffer,
1900 						    dir, false);
1901 		if (ret < 0) {
1902 			pr_err("src1 channel map job buffer failed!");
1903 			goto error_unmap_buffer;
1904 		}
1905 	}
1906 
1907 	if (job->els_buffer.ex_addr) {
1908 		ret = rga_mm_map_channel_job_buffer(job, &req->pat,
1909 						    &job->els_buffer,
1910 						    DMA_BIDIRECTIONAL, false);
1911 		if (ret < 0) {
1912 			pr_err("els channel map job buffer failed!");
1913 			goto error_unmap_buffer;
1914 		}
1915 	}
1916 
1917 	rga_mm_set_mmu_flag(job);
1918 	return 0;
1919 
1920 error_unmap_buffer:
1921 	rga_mm_unmap_buffer_info(job);
1922 
1923 	return ret;
1924 }
1925 
rga_mm_map_job_info(struct rga_job * job)1926 int rga_mm_map_job_info(struct rga_job *job)
1927 {
1928 	int ret;
1929 
1930 	if (job->flags & RGA_JOB_USE_HANDLE) {
1931 		ret = rga_mm_get_handle_info(job);
1932 		if (ret < 0) {
1933 			pr_err("failed to get buffer from handle\n");
1934 			return ret;
1935 		}
1936 	} else {
1937 		ret = rga_mm_map_buffer_info(job);
1938 		if (ret < 0) {
1939 			pr_err("failed to map buffer\n");
1940 			return ret;
1941 		}
1942 	}
1943 
1944 	return 0;
1945 }
1946 
rga_mm_unmap_job_info(struct rga_job * job)1947 void rga_mm_unmap_job_info(struct rga_job *job)
1948 {
1949 	if (job->flags & RGA_JOB_USE_HANDLE)
1950 		rga_mm_put_handle_info(job);
1951 	else
1952 		rga_mm_unmap_buffer_info(job);
1953 }
1954 
rga_mm_import_buffer(struct rga_external_buffer * external_buffer,struct rga_session * session)1955 uint32_t rga_mm_import_buffer(struct rga_external_buffer *external_buffer,
1956 			      struct rga_session *session)
1957 {
1958 	int ret = 0, new_id;
1959 	struct rga_mm *mm;
1960 	struct rga_internal_buffer *internal_buffer;
1961 
1962 	mm = rga_drvdata->mm;
1963 	if (mm == NULL) {
1964 		pr_err("rga mm is null!\n");
1965 		return 0;
1966 	}
1967 
1968 	mutex_lock(&mm->lock);
1969 
1970 	/* first, Check whether to rga_mm */
1971 	internal_buffer = rga_mm_lookup_external(mm, external_buffer);
1972 	if (!IS_ERR_OR_NULL(internal_buffer)) {
1973 		kref_get(&internal_buffer->refcount);
1974 
1975 		mutex_unlock(&mm->lock);
1976 		return internal_buffer->handle;
1977 	}
1978 
1979 	/* finally, map and cached external_buffer in rga_mm */
1980 	internal_buffer = kzalloc(sizeof(struct rga_internal_buffer), GFP_KERNEL);
1981 	if (internal_buffer == NULL) {
1982 		pr_err("%s alloc internal_buffer error!\n", __func__);
1983 
1984 		mutex_unlock(&mm->lock);
1985 		return 0;
1986 	}
1987 
1988 	ret = rga_mm_map_buffer(external_buffer, internal_buffer, NULL, true);
1989 	if (ret < 0)
1990 		goto FREE_INTERNAL_BUFFER;
1991 
1992 	kref_init(&internal_buffer->refcount);
1993 	internal_buffer->session = session;
1994 
1995 	/*
1996 	 * Get the user-visible handle using idr. Preload and perform
1997 	 * allocation under our spinlock.
1998 	 */
1999 	idr_preload(GFP_KERNEL);
2000 	new_id = idr_alloc_cyclic(&mm->memory_idr, internal_buffer, 1, 0, GFP_NOWAIT);
2001 	idr_preload_end();
2002 	if (new_id < 0) {
2003 		pr_err("internal_buffer alloc id failed!\n");
2004 		goto FREE_INTERNAL_BUFFER;
2005 	}
2006 
2007 	internal_buffer->handle = new_id;
2008 	mm->buffer_count++;
2009 
2010 	if (DEBUGGER_EN(MM)) {
2011 		pr_info("import buffer:\n");
2012 		rga_mm_dump_buffer(internal_buffer);
2013 	}
2014 
2015 	mutex_unlock(&mm->lock);
2016 	return internal_buffer->handle;
2017 
2018 FREE_INTERNAL_BUFFER:
2019 	mutex_unlock(&mm->lock);
2020 	kfree(internal_buffer);
2021 
2022 	return 0;
2023 }
2024 
rga_mm_release_buffer(uint32_t handle)2025 int rga_mm_release_buffer(uint32_t handle)
2026 {
2027 	struct rga_mm *mm;
2028 	struct rga_internal_buffer *internal_buffer;
2029 
2030 	mm = rga_drvdata->mm;
2031 	if (mm == NULL) {
2032 		pr_err("rga mm is null!\n");
2033 		return -EFAULT;
2034 	}
2035 
2036 	mutex_lock(&mm->lock);
2037 
2038 	/* Find the buffer that has been imported */
2039 	internal_buffer = rga_mm_lookup_handle(mm, handle);
2040 	if (IS_ERR_OR_NULL(internal_buffer)) {
2041 		pr_err("This is not a buffer that has been imported, handle = %d\n", (int)handle);
2042 
2043 		mutex_unlock(&mm->lock);
2044 		return -ENOENT;
2045 	}
2046 
2047 	if (DEBUGGER_EN(MM)) {
2048 		pr_info("release buffer:\n");
2049 		rga_mm_dump_buffer(internal_buffer);
2050 	}
2051 
2052 	kref_put(&internal_buffer->refcount, rga_mm_kref_release_buffer);
2053 
2054 	mutex_unlock(&mm->lock);
2055 	return 0;
2056 }
2057 
rga_mm_session_release_buffer(struct rga_session * session)2058 int rga_mm_session_release_buffer(struct rga_session *session)
2059 {
2060 	int i;
2061 	struct rga_mm *mm;
2062 	struct rga_internal_buffer *buffer;
2063 
2064 	mm = rga_drvdata->mm;
2065 	if (mm == NULL) {
2066 		pr_err("rga mm is null!\n");
2067 		return -EFAULT;
2068 	}
2069 
2070 	mutex_lock(&mm->lock);
2071 
2072 	idr_for_each_entry(&mm->memory_idr, buffer, i) {
2073 		if (session == buffer->session) {
2074 			pr_err("[tgid:%d] Decrement the reference of handle[%d] when the user exits\n",
2075 			       session->tgid, buffer->handle);
2076 			kref_put(&buffer->refcount, rga_mm_kref_release_buffer);
2077 		}
2078 	}
2079 
2080 	mutex_unlock(&mm->lock);
2081 	return 0;
2082 }
2083 
rga_mm_init(struct rga_mm ** mm_session)2084 int rga_mm_init(struct rga_mm **mm_session)
2085 {
2086 	struct rga_mm *mm = NULL;
2087 
2088 	*mm_session = kzalloc(sizeof(struct rga_mm), GFP_KERNEL);
2089 	if (*mm_session == NULL) {
2090 		pr_err("can not kzalloc for rga buffer mm_session\n");
2091 		return -ENOMEM;
2092 	}
2093 
2094 	mm = *mm_session;
2095 
2096 	mutex_init(&mm->lock);
2097 	idr_init_base(&mm->memory_idr, 1);
2098 
2099 	return 0;
2100 }
2101 
rga_mm_remove(struct rga_mm ** mm_session)2102 int rga_mm_remove(struct rga_mm **mm_session)
2103 {
2104 	struct rga_mm *mm = *mm_session;
2105 
2106 	mutex_lock(&mm->lock);
2107 
2108 	idr_for_each(&mm->memory_idr, &rga_mm_handle_remove, mm);
2109 	idr_destroy(&mm->memory_idr);
2110 
2111 	mutex_unlock(&mm->lock);
2112 
2113 	kfree(*mm_session);
2114 	*mm_session = NULL;
2115 
2116 	return 0;
2117 }
2118