Lines Matching refs:rknpu_obj
32 static int rknpu_gem_get_pages(struct rknpu_gem_object *rknpu_obj) in rknpu_gem_get_pages() argument
34 struct drm_device *drm = rknpu_obj->base.dev; in rknpu_gem_get_pages()
40 rknpu_obj->pages = drm_gem_get_pages(&rknpu_obj->base); in rknpu_gem_get_pages()
41 if (IS_ERR(rknpu_obj->pages)) { in rknpu_gem_get_pages()
42 ret = PTR_ERR(rknpu_obj->pages); in rknpu_gem_get_pages()
47 rknpu_obj->num_pages = rknpu_obj->size >> PAGE_SHIFT; in rknpu_gem_get_pages()
50 rknpu_obj->sgt = drm_prime_pages_to_sg(drm, rknpu_obj->pages, in rknpu_gem_get_pages()
51 rknpu_obj->num_pages); in rknpu_gem_get_pages()
53 rknpu_obj->sgt = in rknpu_gem_get_pages()
54 drm_prime_pages_to_sg(rknpu_obj->pages, rknpu_obj->num_pages); in rknpu_gem_get_pages()
56 if (IS_ERR(rknpu_obj->sgt)) { in rknpu_gem_get_pages()
57 ret = PTR_ERR(rknpu_obj->sgt); in rknpu_gem_get_pages()
62 ret = dma_map_sg(drm->dev, rknpu_obj->sgt->sgl, rknpu_obj->sgt->nents, in rknpu_gem_get_pages()
67 rknpu_obj->size); in rknpu_gem_get_pages()
72 if (rknpu_obj->flags & RKNPU_MEM_KERNEL_MAPPING) { in rknpu_gem_get_pages()
73 rknpu_obj->cookie = vmap(rknpu_obj->pages, rknpu_obj->num_pages, in rknpu_gem_get_pages()
75 if (!rknpu_obj->cookie) { in rknpu_gem_get_pages()
80 rknpu_obj->kv_addr = rknpu_obj->cookie; in rknpu_gem_get_pages()
83 dma_addr = sg_dma_address(rknpu_obj->sgt->sgl); in rknpu_gem_get_pages()
84 rknpu_obj->dma_addr = dma_addr; in rknpu_gem_get_pages()
86 for_each_sg(rknpu_obj->sgt->sgl, s, rknpu_obj->sgt->nents, i) { in rknpu_gem_get_pages()
97 dma_unmap_sg(drm->dev, rknpu_obj->sgt->sgl, rknpu_obj->sgt->nents, in rknpu_gem_get_pages()
101 sg_free_table(rknpu_obj->sgt); in rknpu_gem_get_pages()
102 kfree(rknpu_obj->sgt); in rknpu_gem_get_pages()
105 drm_gem_put_pages(&rknpu_obj->base, rknpu_obj->pages, false, false); in rknpu_gem_get_pages()
110 static void rknpu_gem_put_pages(struct rknpu_gem_object *rknpu_obj) in rknpu_gem_put_pages() argument
112 struct drm_device *drm = rknpu_obj->base.dev; in rknpu_gem_put_pages()
114 if (rknpu_obj->flags & RKNPU_MEM_KERNEL_MAPPING) { in rknpu_gem_put_pages()
115 vunmap(rknpu_obj->kv_addr); in rknpu_gem_put_pages()
116 rknpu_obj->kv_addr = NULL; in rknpu_gem_put_pages()
119 if (rknpu_obj->sgt != NULL) { in rknpu_gem_put_pages()
120 dma_unmap_sg(drm->dev, rknpu_obj->sgt->sgl, in rknpu_gem_put_pages()
121 rknpu_obj->sgt->nents, DMA_BIDIRECTIONAL); in rknpu_gem_put_pages()
122 sg_free_table(rknpu_obj->sgt); in rknpu_gem_put_pages()
123 kfree(rknpu_obj->sgt); in rknpu_gem_put_pages()
126 drm_gem_put_pages(&rknpu_obj->base, rknpu_obj->pages, true, true); in rknpu_gem_put_pages()
130 static int rknpu_gem_alloc_buf(struct rknpu_gem_object *rknpu_obj) in rknpu_gem_alloc_buf() argument
132 struct drm_device *drm = rknpu_obj->base.dev; in rknpu_gem_alloc_buf()
140 if (rknpu_obj->dma_addr) { in rknpu_gem_alloc_buf()
145 rknpu_obj->dma_attrs = 0; in rknpu_gem_alloc_buf()
152 if (!(rknpu_obj->flags & RKNPU_MEM_NON_CONTIGUOUS)) in rknpu_gem_alloc_buf()
153 rknpu_obj->dma_attrs |= DMA_ATTR_FORCE_CONTIGUOUS; in rknpu_gem_alloc_buf()
156 if (rknpu_obj->flags & RKNPU_MEM_CACHEABLE) { in rknpu_gem_alloc_buf()
158 rknpu_obj->dma_attrs |= DMA_ATTR_NON_CONSISTENT; in rknpu_gem_alloc_buf()
161 rknpu_obj->dma_attrs |= DMA_ATTR_SYS_CACHE_ONLY; in rknpu_gem_alloc_buf()
163 } else if (rknpu_obj->flags & RKNPU_MEM_WRITE_COMBINE) { in rknpu_gem_alloc_buf()
164 rknpu_obj->dma_attrs |= DMA_ATTR_WRITE_COMBINE; in rknpu_gem_alloc_buf()
167 if (!(rknpu_obj->flags & RKNPU_MEM_KERNEL_MAPPING)) in rknpu_gem_alloc_buf()
168 rknpu_obj->dma_attrs |= DMA_ATTR_NO_KERNEL_MAPPING; in rknpu_gem_alloc_buf()
171 if (!(rknpu_obj->flags & RKNPU_MEM_ZEROING)) in rknpu_gem_alloc_buf()
172 rknpu_obj->dma_attrs |= DMA_ATTR_SKIP_ZEROING; in rknpu_gem_alloc_buf()
176 if ((rknpu_obj->flags & RKNPU_MEM_NON_CONTIGUOUS) && in rknpu_gem_alloc_buf()
178 return rknpu_gem_get_pages(rknpu_obj); in rknpu_gem_alloc_buf()
182 if (rknpu_obj->flags & RKNPU_MEM_ZEROING) in rknpu_gem_alloc_buf()
187 (rknpu_obj->flags & RKNPU_MEM_DMA32)) { in rknpu_gem_alloc_buf()
192 nr_pages = rknpu_obj->size >> PAGE_SHIFT; in rknpu_gem_alloc_buf()
194 rknpu_obj->pages = rknpu_gem_alloc_page(nr_pages); in rknpu_gem_alloc_buf()
195 if (!rknpu_obj->pages) { in rknpu_gem_alloc_buf()
200 rknpu_obj->cookie = in rknpu_gem_alloc_buf()
201 dma_alloc_attrs(drm->dev, rknpu_obj->size, &rknpu_obj->dma_addr, in rknpu_gem_alloc_buf()
202 gfp_mask, rknpu_obj->dma_attrs); in rknpu_gem_alloc_buf()
203 if (!rknpu_obj->cookie) { in rknpu_gem_alloc_buf()
208 if (!(rknpu_obj->flags & RKNPU_MEM_NON_CONTIGUOUS) && in rknpu_gem_alloc_buf()
213 rknpu_obj->size); in rknpu_gem_alloc_buf()
214 rknpu_obj->dma_attrs &= ~DMA_ATTR_FORCE_CONTIGUOUS; in rknpu_gem_alloc_buf()
215 rknpu_obj->flags |= RKNPU_MEM_NON_CONTIGUOUS; in rknpu_gem_alloc_buf()
216 rknpu_obj->cookie = in rknpu_gem_alloc_buf()
217 dma_alloc_attrs(drm->dev, rknpu_obj->size, in rknpu_gem_alloc_buf()
218 &rknpu_obj->dma_addr, gfp_mask, in rknpu_gem_alloc_buf()
219 rknpu_obj->dma_attrs); in rknpu_gem_alloc_buf()
220 if (!rknpu_obj->cookie) { in rknpu_gem_alloc_buf()
224 rknpu_obj->size); in rknpu_gem_alloc_buf()
230 rknpu_obj->size); in rknpu_gem_alloc_buf()
235 if (rknpu_obj->flags & RKNPU_MEM_KERNEL_MAPPING) in rknpu_gem_alloc_buf()
236 rknpu_obj->kv_addr = rknpu_obj->cookie; in rknpu_gem_alloc_buf()
244 ret = dma_get_sgtable_attrs(drm->dev, sgt, rknpu_obj->cookie, in rknpu_gem_alloc_buf()
245 rknpu_obj->dma_addr, rknpu_obj->size, in rknpu_gem_alloc_buf()
246 rknpu_obj->dma_attrs); in rknpu_gem_alloc_buf()
259 ret = drm_prime_sg_to_page_addr_arrays(sgt, rknpu_obj->pages, NULL, in rknpu_gem_alloc_buf()
262 ret = drm_prime_sg_to_page_array(sgt, rknpu_obj->pages, nr_pages); in rknpu_gem_alloc_buf()
270 rknpu_obj->sgt = sgt; in rknpu_gem_alloc_buf()
279 dma_free_attrs(drm->dev, rknpu_obj->size, rknpu_obj->cookie, in rknpu_gem_alloc_buf()
280 rknpu_obj->dma_addr, rknpu_obj->dma_attrs); in rknpu_gem_alloc_buf()
282 rknpu_gem_free_page(rknpu_obj->pages); in rknpu_gem_alloc_buf()
287 static void rknpu_gem_free_buf(struct rknpu_gem_object *rknpu_obj) in rknpu_gem_free_buf() argument
289 struct drm_device *drm = rknpu_obj->base.dev; in rknpu_gem_free_buf()
294 if (!rknpu_obj->dma_addr) { in rknpu_gem_free_buf()
300 if ((rknpu_obj->flags & RKNPU_MEM_NON_CONTIGUOUS) && in rknpu_gem_free_buf()
302 rknpu_gem_put_pages(rknpu_obj); in rknpu_gem_free_buf()
307 sg_free_table(rknpu_obj->sgt); in rknpu_gem_free_buf()
308 kfree(rknpu_obj->sgt); in rknpu_gem_free_buf()
310 dma_free_attrs(drm->dev, rknpu_obj->size, rknpu_obj->cookie, in rknpu_gem_free_buf()
311 rknpu_obj->dma_addr, rknpu_obj->dma_attrs); in rknpu_gem_free_buf()
313 rknpu_gem_free_page(rknpu_obj->pages); in rknpu_gem_free_buf()
315 rknpu_obj->dma_addr = 0; in rknpu_gem_free_buf()
367 struct rknpu_gem_object *rknpu_obj = NULL; in rknpu_gem_init() local
372 rknpu_obj = kzalloc(sizeof(*rknpu_obj), GFP_KERNEL); in rknpu_gem_init()
373 if (!rknpu_obj) in rknpu_gem_init()
376 obj = &rknpu_obj->base; in rknpu_gem_init()
384 kfree(rknpu_obj); in rknpu_gem_init()
388 rknpu_obj->size = rknpu_obj->base.size; in rknpu_gem_init()
392 if (rknpu_obj->flags & RKNPU_MEM_ZEROING) in rknpu_gem_init()
397 (rknpu_obj->flags & RKNPU_MEM_DMA32)) { in rknpu_gem_init()
404 return rknpu_obj; in rknpu_gem_init()
407 static void rknpu_gem_release(struct rknpu_gem_object *rknpu_obj) in rknpu_gem_release() argument
410 drm_gem_object_release(&rknpu_obj->base); in rknpu_gem_release()
411 kfree(rknpu_obj); in rknpu_gem_release()
414 static int rknpu_gem_alloc_buf_with_cache(struct rknpu_gem_object *rknpu_obj, in rknpu_gem_alloc_buf_with_cache() argument
417 struct drm_device *drm = rknpu_obj->base.dev; in rknpu_gem_alloc_buf_with_cache()
435 cache_offset = rknpu_obj->sram_obj->range_start * in rknpu_gem_alloc_buf_with_cache()
437 cache_size = rknpu_obj->sram_size; in rknpu_gem_alloc_buf_with_cache()
442 cache_size = rknpu_obj->nbuf_size; in rknpu_gem_alloc_buf_with_cache()
458 rknpu_obj->iova_size = iova_align(iovad, cache_size + rknpu_obj->size); in rknpu_gem_alloc_buf_with_cache()
459 rknpu_obj->iova_start = rknpu_iommu_dma_alloc_iova( in rknpu_gem_alloc_buf_with_cache()
460 domain, rknpu_obj->iova_size, dma_get_mask(drm->dev), drm->dev); in rknpu_gem_alloc_buf_with_cache()
461 if (!rknpu_obj->iova_start) { in rknpu_gem_alloc_buf_with_cache()
467 &rknpu_obj->iova_start, rknpu_obj->iova_size); in rknpu_gem_alloc_buf_with_cache()
493 ret = iommu_map(domain, rknpu_obj->iova_start, in rknpu_gem_alloc_buf_with_cache()
501 rknpu_obj->dma_addr = rknpu_obj->iova_start; in rknpu_gem_alloc_buf_with_cache()
503 if (rknpu_obj->size == 0) { in rknpu_gem_alloc_buf_with_cache()
508 rknpu_obj->pages = drm_gem_get_pages(&rknpu_obj->base); in rknpu_gem_alloc_buf_with_cache()
509 if (IS_ERR(rknpu_obj->pages)) { in rknpu_gem_alloc_buf_with_cache()
510 ret = PTR_ERR(rknpu_obj->pages); in rknpu_gem_alloc_buf_with_cache()
515 rknpu_obj->num_pages = rknpu_obj->size >> PAGE_SHIFT; in rknpu_gem_alloc_buf_with_cache()
518 rknpu_obj->sgt = drm_prime_pages_to_sg(drm, rknpu_obj->pages, in rknpu_gem_alloc_buf_with_cache()
519 rknpu_obj->num_pages); in rknpu_gem_alloc_buf_with_cache()
521 rknpu_obj->sgt = in rknpu_gem_alloc_buf_with_cache()
522 drm_prime_pages_to_sg(rknpu_obj->pages, rknpu_obj->num_pages); in rknpu_gem_alloc_buf_with_cache()
524 if (IS_ERR(rknpu_obj->sgt)) { in rknpu_gem_alloc_buf_with_cache()
525 ret = PTR_ERR(rknpu_obj->sgt); in rknpu_gem_alloc_buf_with_cache()
530 length = rknpu_obj->size; in rknpu_gem_alloc_buf_with_cache()
531 offset = rknpu_obj->iova_start + cache_size; in rknpu_gem_alloc_buf_with_cache()
533 for_each_sg(rknpu_obj->sgt->sgl, s, rknpu_obj->sgt->nents, i) { in rknpu_gem_alloc_buf_with_cache()
550 LOG_INFO("allocate size: %lu with cache size: %lu\n", rknpu_obj->size, in rknpu_gem_alloc_buf_with_cache()
556 iommu_unmap(domain, rknpu_obj->iova_start + cache_size, in rknpu_gem_alloc_buf_with_cache()
557 rknpu_obj->size - length); in rknpu_gem_alloc_buf_with_cache()
558 sg_free_table(rknpu_obj->sgt); in rknpu_gem_alloc_buf_with_cache()
559 kfree(rknpu_obj->sgt); in rknpu_gem_alloc_buf_with_cache()
562 drm_gem_put_pages(&rknpu_obj->base, rknpu_obj->pages, false, false); in rknpu_gem_alloc_buf_with_cache()
565 iommu_unmap(domain, rknpu_obj->iova_start, cache_size); in rknpu_gem_alloc_buf_with_cache()
569 rknpu_obj->iova_start, rknpu_obj->iova_size); in rknpu_gem_alloc_buf_with_cache()
574 static void rknpu_gem_free_buf_with_cache(struct rknpu_gem_object *rknpu_obj, in rknpu_gem_free_buf_with_cache() argument
577 struct drm_device *drm = rknpu_obj->base.dev; in rknpu_gem_free_buf_with_cache()
584 cache_size = rknpu_obj->sram_size; in rknpu_gem_free_buf_with_cache()
587 cache_size = rknpu_obj->nbuf_size; in rknpu_gem_free_buf_with_cache()
596 iommu_unmap(domain, rknpu_obj->iova_start, cache_size); in rknpu_gem_free_buf_with_cache()
597 if (rknpu_obj->size > 0) in rknpu_gem_free_buf_with_cache()
598 iommu_unmap(domain, rknpu_obj->iova_start + cache_size, in rknpu_gem_free_buf_with_cache()
599 rknpu_obj->size); in rknpu_gem_free_buf_with_cache()
601 rknpu_obj->iova_start, in rknpu_gem_free_buf_with_cache()
602 rknpu_obj->iova_size); in rknpu_gem_free_buf_with_cache()
605 if (rknpu_obj->pages) in rknpu_gem_free_buf_with_cache()
606 drm_gem_put_pages(&rknpu_obj->base, rknpu_obj->pages, true, in rknpu_gem_free_buf_with_cache()
609 if (rknpu_obj->sgt != NULL) { in rknpu_gem_free_buf_with_cache()
610 sg_free_table(rknpu_obj->sgt); in rknpu_gem_free_buf_with_cache()
611 kfree(rknpu_obj->sgt); in rknpu_gem_free_buf_with_cache()
621 struct rknpu_gem_object *rknpu_obj = NULL; in rknpu_gem_object_create() local
650 rknpu_obj = rknpu_gem_init(drm, remain_ddr_size); in rknpu_gem_object_create()
651 if (IS_ERR(rknpu_obj)) in rknpu_gem_object_create()
652 return rknpu_obj; in rknpu_gem_object_create()
655 rknpu_obj->flags = flags; in rknpu_gem_object_create()
666 &rknpu_obj->sram_obj); in rknpu_gem_object_create()
679 rknpu_obj->sram_size = real_sram_size; in rknpu_gem_object_create()
681 ret = rknpu_gem_alloc_buf_with_cache(rknpu_obj, in rknpu_gem_object_create()
692 rknpu_obj = rknpu_gem_init(drm, remain_ddr_size); in rknpu_gem_object_create()
693 if (IS_ERR(rknpu_obj)) in rknpu_gem_object_create()
694 return rknpu_obj; in rknpu_gem_object_create()
701 rknpu_obj->flags = flags; in rknpu_gem_object_create()
704 rknpu_obj->nbuf_size = nbuf_size; in rknpu_gem_object_create()
706 ret = rknpu_gem_alloc_buf_with_cache(rknpu_obj, in rknpu_gem_object_create()
715 rknpu_obj = rknpu_gem_init(drm, remain_ddr_size); in rknpu_gem_object_create()
716 if (IS_ERR(rknpu_obj)) in rknpu_gem_object_create()
717 return rknpu_obj; in rknpu_gem_object_create()
720 rknpu_obj->flags = flags; in rknpu_gem_object_create()
722 ret = rknpu_gem_alloc_buf(rknpu_obj); in rknpu_gem_object_create()
727 if (rknpu_obj) in rknpu_gem_object_create()
730 &rknpu_obj->dma_addr, rknpu_obj->cookie, in rknpu_gem_object_create()
731 rknpu_obj->size, rknpu_obj->sram_size, in rknpu_gem_object_create()
732 rknpu_obj->nbuf_size, rknpu_obj->dma_attrs, in rknpu_gem_object_create()
733 rknpu_obj->flags); in rknpu_gem_object_create()
735 return rknpu_obj; in rknpu_gem_object_create()
739 rknpu_obj->sram_obj != NULL) in rknpu_gem_object_create()
740 rknpu_mm_free(rknpu_dev->sram_mm, rknpu_obj->sram_obj); in rknpu_gem_object_create()
743 rknpu_gem_release(rknpu_obj); in rknpu_gem_object_create()
748 void rknpu_gem_object_destroy(struct rknpu_gem_object *rknpu_obj) in rknpu_gem_object_destroy() argument
750 struct drm_gem_object *obj = &rknpu_obj->base; in rknpu_gem_object_destroy()
754 &rknpu_obj->dma_addr, rknpu_obj->cookie, rknpu_obj->size, in rknpu_gem_object_destroy()
755 rknpu_obj->dma_attrs, rknpu_obj->flags, obj->handle_count); in rknpu_gem_object_destroy()
764 drm_prime_gem_destroy(obj, rknpu_obj->sgt); in rknpu_gem_object_destroy()
765 rknpu_gem_free_page(rknpu_obj->pages); in rknpu_gem_object_destroy()
768 rknpu_obj->sram_size > 0) { in rknpu_gem_object_destroy()
771 if (rknpu_obj->sram_obj != NULL) in rknpu_gem_object_destroy()
773 rknpu_obj->sram_obj); in rknpu_gem_object_destroy()
774 rknpu_gem_free_buf_with_cache(rknpu_obj, in rknpu_gem_object_destroy()
777 rknpu_obj->nbuf_size > 0) { in rknpu_gem_object_destroy()
778 rknpu_gem_free_buf_with_cache(rknpu_obj, in rknpu_gem_object_destroy()
781 rknpu_gem_free_buf(rknpu_obj); in rknpu_gem_object_destroy()
785 rknpu_gem_release(rknpu_obj); in rknpu_gem_object_destroy()
792 struct rknpu_gem_object *rknpu_obj = NULL; in rknpu_gem_create_ioctl() local
795 rknpu_obj = rknpu_gem_object_find(file_priv, args->handle); in rknpu_gem_create_ioctl()
796 if (!rknpu_obj) { in rknpu_gem_create_ioctl()
797 rknpu_obj = rknpu_gem_object_create( in rknpu_gem_create_ioctl()
799 if (IS_ERR(rknpu_obj)) in rknpu_gem_create_ioctl()
800 return PTR_ERR(rknpu_obj); in rknpu_gem_create_ioctl()
802 ret = rknpu_gem_handle_create(&rknpu_obj->base, file_priv, in rknpu_gem_create_ioctl()
805 rknpu_gem_object_destroy(rknpu_obj); in rknpu_gem_create_ioctl()
812 args->size = rknpu_obj->size; in rknpu_gem_create_ioctl()
813 args->sram_size = rknpu_obj->sram_size; in rknpu_gem_create_ioctl()
814 args->obj_addr = (__u64)(uintptr_t)rknpu_obj; in rknpu_gem_create_ioctl()
815 args->dma_addr = rknpu_obj->dma_addr; in rknpu_gem_create_ioctl()
837 struct rknpu_gem_object *rknpu_obj = NULL; in rknpu_gem_destroy_ioctl() local
840 rknpu_obj = rknpu_gem_object_find(file_priv, args->handle); in rknpu_gem_destroy_ioctl()
841 if (!rknpu_obj) in rknpu_gem_destroy_ioctl()
886 static int rknpu_gem_mmap_pages(struct rknpu_gem_object *rknpu_obj, in rknpu_gem_mmap_pages() argument
889 struct drm_device *drm = rknpu_obj->base.dev; in rknpu_gem_mmap_pages()
894 ret = __vm_map_pages(vma, rknpu_obj->pages, rknpu_obj->num_pages, in rknpu_gem_mmap_pages()
904 static int rknpu_gem_mmap_cache(struct rknpu_gem_object *rknpu_obj, in rknpu_gem_mmap_cache() argument
908 struct drm_device *drm = rknpu_obj->base.dev; in rknpu_gem_mmap_cache()
924 cache_offset = rknpu_obj->sram_obj->range_start * in rknpu_gem_mmap_cache()
926 cache_size = rknpu_obj->sram_size; in rknpu_gem_mmap_cache()
931 cache_size = rknpu_obj->nbuf_size; in rknpu_gem_mmap_cache()
956 if (rknpu_obj->size == 0) in rknpu_gem_mmap_cache()
964 rknpu_obj->pages[i]); in rknpu_gem_mmap_cache()
973 static int rknpu_gem_mmap_buffer(struct rknpu_gem_object *rknpu_obj, in rknpu_gem_mmap_buffer() argument
976 struct drm_device *drm = rknpu_obj->base.dev; in rknpu_gem_mmap_buffer()
994 if (vm_size > rknpu_obj->size) in rknpu_gem_mmap_buffer()
997 if (rknpu_obj->sram_size > 0) in rknpu_gem_mmap_buffer()
998 return rknpu_gem_mmap_cache(rknpu_obj, vma, RKNPU_CACHE_SRAM); in rknpu_gem_mmap_buffer()
999 else if (rknpu_obj->nbuf_size > 0) in rknpu_gem_mmap_buffer()
1000 return rknpu_gem_mmap_cache(rknpu_obj, vma, RKNPU_CACHE_NBUF); in rknpu_gem_mmap_buffer()
1003 if ((rknpu_obj->flags & RKNPU_MEM_NON_CONTIGUOUS) && in rknpu_gem_mmap_buffer()
1005 return rknpu_gem_mmap_pages(rknpu_obj, vma); in rknpu_gem_mmap_buffer()
1009 ret = dma_mmap_attrs(drm->dev, vma, rknpu_obj->cookie, in rknpu_gem_mmap_buffer()
1010 rknpu_obj->dma_addr, rknpu_obj->size, in rknpu_gem_mmap_buffer()
1011 rknpu_obj->dma_attrs); in rknpu_gem_mmap_buffer()
1029 struct rknpu_gem_object *rknpu_obj = NULL; in rknpu_gem_dumb_create() local
1046 rknpu_obj = rknpu_gem_object_create(drm, flags, args->size, 0); in rknpu_gem_dumb_create()
1047 if (IS_ERR(rknpu_obj)) { in rknpu_gem_dumb_create()
1049 return PTR_ERR(rknpu_obj); in rknpu_gem_dumb_create()
1052 ret = rknpu_gem_handle_create(&rknpu_obj->base, file_priv, in rknpu_gem_dumb_create()
1055 rknpu_gem_object_destroy(rknpu_obj); in rknpu_gem_dumb_create()
1067 struct rknpu_gem_object *rknpu_obj = NULL; in rknpu_gem_dumb_map_offset() local
1071 rknpu_obj = rknpu_gem_object_find(file_priv, handle); in rknpu_gem_dumb_map_offset()
1072 if (!rknpu_obj) in rknpu_gem_dumb_map_offset()
1076 obj = &rknpu_obj->base; in rknpu_gem_dumb_map_offset()
1095 struct rknpu_gem_object *rknpu_obj = to_rknpu_obj(obj); in rknpu_gem_fault() local
1096 struct drm_device *drm = rknpu_obj->base.dev; in rknpu_gem_fault()
1102 if (page_offset >= (rknpu_obj->size >> PAGE_SHIFT)) { in rknpu_gem_fault()
1107 pfn = page_to_pfn(rknpu_obj->pages[page_offset]); in rknpu_gem_fault()
1116 struct rknpu_gem_object *rknpu_obj = to_rknpu_obj(obj); in rknpu_gem_fault() local
1117 struct drm_device *drm = rknpu_obj->base.dev; in rknpu_gem_fault()
1124 if (page_offset >= (rknpu_obj->size >> PAGE_SHIFT)) { in rknpu_gem_fault()
1130 pfn = page_to_pfn(rknpu_obj->pages[page_offset]); in rknpu_gem_fault()
1149 struct rknpu_gem_object *rknpu_obj = to_rknpu_obj(obj); in rknpu_gem_fault() local
1150 struct drm_device *drm = rknpu_obj->base.dev; in rknpu_gem_fault()
1158 if (page_offset >= (rknpu_obj->size >> PAGE_SHIFT)) { in rknpu_gem_fault()
1164 pfn = page_to_pfn(rknpu_obj->pages[page_offset]); in rknpu_gem_fault()
1184 struct rknpu_gem_object *rknpu_obj = to_rknpu_obj(obj); in rknpu_gem_mmap_obj() local
1187 LOG_DEBUG("flags: %#x\n", rknpu_obj->flags); in rknpu_gem_mmap_obj()
1190 if (rknpu_obj->flags & RKNPU_MEM_CACHEABLE) { in rknpu_gem_mmap_obj()
1192 } else if (rknpu_obj->flags & RKNPU_MEM_WRITE_COMBINE) { in rknpu_gem_mmap_obj()
1200 ret = rknpu_gem_mmap_buffer(rknpu_obj, vma); in rknpu_gem_mmap_obj()
1243 struct rknpu_gem_object *rknpu_obj = to_rknpu_obj(obj); in rknpu_gem_prime_get_sg_table() local
1246 npages = rknpu_obj->size >> PAGE_SHIFT; in rknpu_gem_prime_get_sg_table()
1249 return drm_prime_pages_to_sg(obj->dev, rknpu_obj->pages, npages); in rknpu_gem_prime_get_sg_table()
1251 return drm_prime_pages_to_sg(rknpu_obj->pages, npages); in rknpu_gem_prime_get_sg_table()
1260 struct rknpu_gem_object *rknpu_obj = NULL; in rknpu_gem_prime_import_sg_table() local
1264 rknpu_obj = rknpu_gem_init(dev, attach->dmabuf->size); in rknpu_gem_prime_import_sg_table()
1265 if (IS_ERR(rknpu_obj)) { in rknpu_gem_prime_import_sg_table()
1266 ret = PTR_ERR(rknpu_obj); in rknpu_gem_prime_import_sg_table()
1270 rknpu_obj->dma_addr = sg_dma_address(sgt->sgl); in rknpu_gem_prime_import_sg_table()
1272 npages = rknpu_obj->size >> PAGE_SHIFT; in rknpu_gem_prime_import_sg_table()
1273 rknpu_obj->pages = rknpu_gem_alloc_page(npages); in rknpu_gem_prime_import_sg_table()
1274 if (!rknpu_obj->pages) { in rknpu_gem_prime_import_sg_table()
1280 ret = drm_prime_sg_to_page_addr_arrays(sgt, rknpu_obj->pages, NULL, in rknpu_gem_prime_import_sg_table()
1283 ret = drm_prime_sg_to_page_array(sgt, rknpu_obj->pages, npages); in rknpu_gem_prime_import_sg_table()
1288 rknpu_obj->sgt = sgt; in rknpu_gem_prime_import_sg_table()
1292 rknpu_obj->flags |= RKNPU_MEM_CONTIGUOUS; in rknpu_gem_prime_import_sg_table()
1300 rknpu_obj->flags |= RKNPU_MEM_NON_CONTIGUOUS; in rknpu_gem_prime_import_sg_table()
1303 return &rknpu_obj->base; in rknpu_gem_prime_import_sg_table()
1306 rknpu_gem_free_page(rknpu_obj->pages); in rknpu_gem_prime_import_sg_table()
1308 rknpu_gem_release(rknpu_obj); in rknpu_gem_prime_import_sg_table()
1315 struct rknpu_gem_object *rknpu_obj = to_rknpu_obj(obj); in rknpu_gem_prime_vmap() local
1317 if (!rknpu_obj->pages) in rknpu_gem_prime_vmap()
1320 return vmap(rknpu_obj->pages, rknpu_obj->num_pages, VM_MAP, in rknpu_gem_prime_vmap()
1331 struct rknpu_gem_object *rknpu_obj = to_rknpu_obj(obj); in rknpu_gem_prime_vmap() local
1333 if (!rknpu_obj->pages) in rknpu_gem_prime_vmap()
1336 map->vaddr = vmap(rknpu_obj->pages, rknpu_obj->num_pages, VM_MAP, in rknpu_gem_prime_vmap()
1359 static int rknpu_cache_sync(struct rknpu_gem_object *rknpu_obj, in rknpu_cache_sync() argument
1364 struct drm_gem_object *obj = &rknpu_obj->base; in rknpu_cache_sync()
1373 cache_offset = rknpu_obj->sram_obj->range_start * in rknpu_cache_sync()
1375 cache_size = rknpu_obj->sram_size; in rknpu_cache_sync()
1380 cache_size = rknpu_obj->nbuf_size; in rknpu_cache_sync()
1414 struct rknpu_gem_object *rknpu_obj = NULL; in rknpu_gem_sync_ioctl() local
1422 rknpu_obj = (struct rknpu_gem_object *)(uintptr_t)args->obj_addr; in rknpu_gem_sync_ioctl()
1423 if (!rknpu_obj) in rknpu_gem_sync_ioctl()
1426 if (!(rknpu_obj->flags & RKNPU_MEM_CACHEABLE)) in rknpu_gem_sync_ioctl()
1429 if (!(rknpu_obj->flags & RKNPU_MEM_NON_CONTIGUOUS)) { in rknpu_gem_sync_ioctl()
1432 dev->dev, rknpu_obj->dma_addr, args->offset, in rknpu_gem_sync_ioctl()
1437 rknpu_obj->dma_addr, in rknpu_gem_sync_ioctl()
1447 rknpu_obj->sram_size > 0) { in rknpu_gem_sync_ioctl()
1448 rknpu_cache_sync(rknpu_obj, &length, &offset, in rknpu_gem_sync_ioctl()
1451 rknpu_obj->nbuf_size > 0) { in rknpu_gem_sync_ioctl()
1452 rknpu_cache_sync(rknpu_obj, &length, &offset, in rknpu_gem_sync_ioctl()
1456 for_each_sg(rknpu_obj->sgt->sgl, sg, rknpu_obj->sgt->nents, in rknpu_gem_sync_ioctl()