xref: /OK3568_Linux_fs/kernel/drivers/gpu/drm/rockchip/rockchip_drm_gem.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd
4*4882a593Smuzhiyun  * Author:Mark Yao <mark.yao@rock-chips.com>
5*4882a593Smuzhiyun  */
6*4882a593Smuzhiyun 
7*4882a593Smuzhiyun #include <linux/dma-buf-cache.h>
8*4882a593Smuzhiyun #include <linux/iommu.h>
9*4882a593Smuzhiyun #include <linux/vmalloc.h>
10*4882a593Smuzhiyun 
11*4882a593Smuzhiyun #include <drm/drm.h>
12*4882a593Smuzhiyun #include <drm/drm_gem.h>
13*4882a593Smuzhiyun #include <drm/drm_prime.h>
14*4882a593Smuzhiyun #include <drm/drm_vma_manager.h>
15*4882a593Smuzhiyun 
16*4882a593Smuzhiyun #include <linux/genalloc.h>
17*4882a593Smuzhiyun #include <linux/iommu.h>
18*4882a593Smuzhiyun #include <linux/pagemap.h>
19*4882a593Smuzhiyun #include <linux/vmalloc.h>
20*4882a593Smuzhiyun #include <linux/rockchip/rockchip_sip.h>
21*4882a593Smuzhiyun 
22*4882a593Smuzhiyun #include "rockchip_drm_drv.h"
23*4882a593Smuzhiyun #include "rockchip_drm_gem.h"
24*4882a593Smuzhiyun 
25*4882a593Smuzhiyun static u32 bank_bit_first = 12;
26*4882a593Smuzhiyun static u32 bank_bit_mask = 0x7;
27*4882a593Smuzhiyun 
28*4882a593Smuzhiyun struct page_info {
29*4882a593Smuzhiyun 	struct page *page;
30*4882a593Smuzhiyun 	struct list_head list;
31*4882a593Smuzhiyun };
32*4882a593Smuzhiyun 
33*4882a593Smuzhiyun #define PG_ROUND       8
34*4882a593Smuzhiyun 
rockchip_gem_iommu_map(struct rockchip_gem_object * rk_obj)35*4882a593Smuzhiyun static int rockchip_gem_iommu_map(struct rockchip_gem_object *rk_obj)
36*4882a593Smuzhiyun {
37*4882a593Smuzhiyun 	struct drm_device *drm = rk_obj->base.dev;
38*4882a593Smuzhiyun 	struct rockchip_drm_private *private = drm->dev_private;
39*4882a593Smuzhiyun 	int prot = IOMMU_READ | IOMMU_WRITE;
40*4882a593Smuzhiyun 	ssize_t ret;
41*4882a593Smuzhiyun 
42*4882a593Smuzhiyun 	mutex_lock(&private->mm_lock);
43*4882a593Smuzhiyun 	ret = drm_mm_insert_node_generic(&private->mm, &rk_obj->mm,
44*4882a593Smuzhiyun 					 rk_obj->base.size, PAGE_SIZE,
45*4882a593Smuzhiyun 					 0, 0);
46*4882a593Smuzhiyun 	mutex_unlock(&private->mm_lock);
47*4882a593Smuzhiyun 
48*4882a593Smuzhiyun 	if (ret < 0) {
49*4882a593Smuzhiyun 		DRM_ERROR("out of I/O virtual memory: %zd\n", ret);
50*4882a593Smuzhiyun 		return ret;
51*4882a593Smuzhiyun 	}
52*4882a593Smuzhiyun 
53*4882a593Smuzhiyun 	rk_obj->dma_addr = rk_obj->mm.start;
54*4882a593Smuzhiyun 
55*4882a593Smuzhiyun 	ret = iommu_map_sgtable(private->domain, rk_obj->dma_addr, rk_obj->sgt,
56*4882a593Smuzhiyun 				prot);
57*4882a593Smuzhiyun 	if (ret < rk_obj->base.size) {
58*4882a593Smuzhiyun 		DRM_ERROR("failed to map buffer: size=%zd request_size=%zd\n",
59*4882a593Smuzhiyun 			  ret, rk_obj->base.size);
60*4882a593Smuzhiyun 		ret = -ENOMEM;
61*4882a593Smuzhiyun 		goto err_remove_node;
62*4882a593Smuzhiyun 	}
63*4882a593Smuzhiyun 
64*4882a593Smuzhiyun 	iommu_flush_iotlb_all(private->domain);
65*4882a593Smuzhiyun 
66*4882a593Smuzhiyun 	rk_obj->size = ret;
67*4882a593Smuzhiyun 
68*4882a593Smuzhiyun 	return 0;
69*4882a593Smuzhiyun 
70*4882a593Smuzhiyun err_remove_node:
71*4882a593Smuzhiyun 	mutex_lock(&private->mm_lock);
72*4882a593Smuzhiyun 	drm_mm_remove_node(&rk_obj->mm);
73*4882a593Smuzhiyun 	mutex_unlock(&private->mm_lock);
74*4882a593Smuzhiyun 
75*4882a593Smuzhiyun 	return ret;
76*4882a593Smuzhiyun }
77*4882a593Smuzhiyun 
rockchip_gem_iommu_unmap(struct rockchip_gem_object * rk_obj)78*4882a593Smuzhiyun static int rockchip_gem_iommu_unmap(struct rockchip_gem_object *rk_obj)
79*4882a593Smuzhiyun {
80*4882a593Smuzhiyun 	struct drm_device *drm = rk_obj->base.dev;
81*4882a593Smuzhiyun 	struct rockchip_drm_private *private = drm->dev_private;
82*4882a593Smuzhiyun 
83*4882a593Smuzhiyun 	iommu_unmap(private->domain, rk_obj->dma_addr, rk_obj->size);
84*4882a593Smuzhiyun 
85*4882a593Smuzhiyun 	mutex_lock(&private->mm_lock);
86*4882a593Smuzhiyun 
87*4882a593Smuzhiyun 	drm_mm_remove_node(&rk_obj->mm);
88*4882a593Smuzhiyun 
89*4882a593Smuzhiyun 	mutex_unlock(&private->mm_lock);
90*4882a593Smuzhiyun 
91*4882a593Smuzhiyun 	return 0;
92*4882a593Smuzhiyun }
93*4882a593Smuzhiyun 
rockchip_gem_free_list(struct list_head lists[])94*4882a593Smuzhiyun static void rockchip_gem_free_list(struct list_head lists[])
95*4882a593Smuzhiyun {
96*4882a593Smuzhiyun 	struct page_info *info, *tmp_info;
97*4882a593Smuzhiyun 	int i;
98*4882a593Smuzhiyun 
99*4882a593Smuzhiyun 	for (i = 0; i < PG_ROUND; i++) {
100*4882a593Smuzhiyun 		list_for_each_entry_safe(info, tmp_info, &lists[i], list) {
101*4882a593Smuzhiyun 			list_del(&info->list);
102*4882a593Smuzhiyun 			kfree(info);
103*4882a593Smuzhiyun 		}
104*4882a593Smuzhiyun 	}
105*4882a593Smuzhiyun }
106*4882a593Smuzhiyun 
rockchip_gem_get_ddr_info(void)107*4882a593Smuzhiyun void rockchip_gem_get_ddr_info(void)
108*4882a593Smuzhiyun {
109*4882a593Smuzhiyun 	struct dram_addrmap_info *ddr_map_info;
110*4882a593Smuzhiyun 
111*4882a593Smuzhiyun 	ddr_map_info = sip_smc_get_dram_map();
112*4882a593Smuzhiyun 	if (ddr_map_info) {
113*4882a593Smuzhiyun 		bank_bit_first = ddr_map_info->bank_bit_first;
114*4882a593Smuzhiyun 		bank_bit_mask = ddr_map_info->bank_bit_mask;
115*4882a593Smuzhiyun 	}
116*4882a593Smuzhiyun }
117*4882a593Smuzhiyun 
rockchip_gem_get_pages(struct rockchip_gem_object * rk_obj)118*4882a593Smuzhiyun static int rockchip_gem_get_pages(struct rockchip_gem_object *rk_obj)
119*4882a593Smuzhiyun {
120*4882a593Smuzhiyun 	struct drm_device *drm = rk_obj->base.dev;
121*4882a593Smuzhiyun 	int ret, i;
122*4882a593Smuzhiyun 	struct scatterlist *s;
123*4882a593Smuzhiyun 	unsigned int cur_page;
124*4882a593Smuzhiyun 	struct page **pages, **dst_pages;
125*4882a593Smuzhiyun 	int j;
126*4882a593Smuzhiyun 	int n_pages;
127*4882a593Smuzhiyun 	unsigned long chunk_pages;
128*4882a593Smuzhiyun 	unsigned long remain;
129*4882a593Smuzhiyun 	struct list_head lists[PG_ROUND];
130*4882a593Smuzhiyun 	dma_addr_t phys;
131*4882a593Smuzhiyun 	int end = 0;
132*4882a593Smuzhiyun 	unsigned int bit_index;
133*4882a593Smuzhiyun 	unsigned int block_index[PG_ROUND] = {0};
134*4882a593Smuzhiyun 	struct page_info *info;
135*4882a593Smuzhiyun 	unsigned int maximum;
136*4882a593Smuzhiyun 
137*4882a593Smuzhiyun 	for (i = 0; i < PG_ROUND; i++)
138*4882a593Smuzhiyun 		INIT_LIST_HEAD(&lists[i]);
139*4882a593Smuzhiyun 
140*4882a593Smuzhiyun 	pages = drm_gem_get_pages(&rk_obj->base);
141*4882a593Smuzhiyun 	if (IS_ERR(pages))
142*4882a593Smuzhiyun 		return PTR_ERR(pages);
143*4882a593Smuzhiyun 
144*4882a593Smuzhiyun 	rk_obj->pages = pages;
145*4882a593Smuzhiyun 
146*4882a593Smuzhiyun 	rk_obj->num_pages = rk_obj->base.size >> PAGE_SHIFT;
147*4882a593Smuzhiyun 
148*4882a593Smuzhiyun 	n_pages = rk_obj->num_pages;
149*4882a593Smuzhiyun 
150*4882a593Smuzhiyun 	dst_pages = __vmalloc(sizeof(struct page *) * n_pages,
151*4882a593Smuzhiyun 			GFP_KERNEL | __GFP_HIGHMEM);
152*4882a593Smuzhiyun 	if (!dst_pages) {
153*4882a593Smuzhiyun 		ret = -ENOMEM;
154*4882a593Smuzhiyun 		goto err_put_pages;
155*4882a593Smuzhiyun 	}
156*4882a593Smuzhiyun 
157*4882a593Smuzhiyun 	DRM_DEBUG_KMS("bank_bit_first = 0x%x, bank_bit_mask = 0x%x\n",
158*4882a593Smuzhiyun 		      bank_bit_first, bank_bit_mask);
159*4882a593Smuzhiyun 
160*4882a593Smuzhiyun 	cur_page = 0;
161*4882a593Smuzhiyun 	remain = n_pages;
162*4882a593Smuzhiyun 	/* look for the end of the current chunk */
163*4882a593Smuzhiyun 	while (remain) {
164*4882a593Smuzhiyun 		for (j = cur_page + 1; j < n_pages; ++j) {
165*4882a593Smuzhiyun 			if (page_to_pfn(pages[j]) !=
166*4882a593Smuzhiyun 				page_to_pfn(pages[j - 1]) + 1)
167*4882a593Smuzhiyun 				break;
168*4882a593Smuzhiyun 		}
169*4882a593Smuzhiyun 
170*4882a593Smuzhiyun 		chunk_pages = j - cur_page;
171*4882a593Smuzhiyun 		if (chunk_pages >= PG_ROUND) {
172*4882a593Smuzhiyun 			for (i = 0; i < chunk_pages; i++)
173*4882a593Smuzhiyun 				dst_pages[end + i] = pages[cur_page + i];
174*4882a593Smuzhiyun 			end += chunk_pages;
175*4882a593Smuzhiyun 		} else {
176*4882a593Smuzhiyun 			for (i = 0; i < chunk_pages; i++) {
177*4882a593Smuzhiyun 				info = kmalloc(sizeof(*info), GFP_KERNEL);
178*4882a593Smuzhiyun 				if (!info) {
179*4882a593Smuzhiyun 					ret = -ENOMEM;
180*4882a593Smuzhiyun 					goto err_put_list;
181*4882a593Smuzhiyun 				}
182*4882a593Smuzhiyun 
183*4882a593Smuzhiyun 				INIT_LIST_HEAD(&info->list);
184*4882a593Smuzhiyun 				info->page = pages[cur_page + i];
185*4882a593Smuzhiyun 				phys = page_to_phys(info->page);
186*4882a593Smuzhiyun 				bit_index = ((phys >> bank_bit_first) & bank_bit_mask) % PG_ROUND;
187*4882a593Smuzhiyun 				list_add_tail(&info->list, &lists[bit_index]);
188*4882a593Smuzhiyun 				block_index[bit_index]++;
189*4882a593Smuzhiyun 			}
190*4882a593Smuzhiyun 		}
191*4882a593Smuzhiyun 
192*4882a593Smuzhiyun 		cur_page = j;
193*4882a593Smuzhiyun 		remain -= chunk_pages;
194*4882a593Smuzhiyun 	}
195*4882a593Smuzhiyun 
196*4882a593Smuzhiyun 	maximum = block_index[0];
197*4882a593Smuzhiyun 	for (i = 1; i < PG_ROUND; i++)
198*4882a593Smuzhiyun 		maximum = max(maximum, block_index[i]);
199*4882a593Smuzhiyun 
200*4882a593Smuzhiyun 	for (i = 0; i < maximum; i++) {
201*4882a593Smuzhiyun 		for (j = 0; j < PG_ROUND; j++) {
202*4882a593Smuzhiyun 			if (!list_empty(&lists[j])) {
203*4882a593Smuzhiyun 				struct page_info *info;
204*4882a593Smuzhiyun 
205*4882a593Smuzhiyun 				info = list_first_entry(&lists[j],
206*4882a593Smuzhiyun 							struct page_info, list);
207*4882a593Smuzhiyun 				dst_pages[end++] = info->page;
208*4882a593Smuzhiyun 				list_del(&info->list);
209*4882a593Smuzhiyun 				kfree(info);
210*4882a593Smuzhiyun 			}
211*4882a593Smuzhiyun 		}
212*4882a593Smuzhiyun 	}
213*4882a593Smuzhiyun 
214*4882a593Smuzhiyun 	DRM_DEBUG_KMS("%s, %d, end = %d, n_pages = %d\n", __func__, __LINE__,
215*4882a593Smuzhiyun 			end, n_pages);
216*4882a593Smuzhiyun 	rk_obj->sgt = drm_prime_pages_to_sg(rk_obj->base.dev,
217*4882a593Smuzhiyun 					    dst_pages, rk_obj->num_pages);
218*4882a593Smuzhiyun 	if (IS_ERR(rk_obj->sgt)) {
219*4882a593Smuzhiyun 		ret = PTR_ERR(rk_obj->sgt);
220*4882a593Smuzhiyun 		goto err_put_list;
221*4882a593Smuzhiyun 	}
222*4882a593Smuzhiyun 
223*4882a593Smuzhiyun 	rk_obj->pages = dst_pages;
224*4882a593Smuzhiyun 
225*4882a593Smuzhiyun 	/*
226*4882a593Smuzhiyun 	 * Fake up the SG table so that dma_sync_sg_for_device() can be used
227*4882a593Smuzhiyun 	 * to flush the pages associated with it.
228*4882a593Smuzhiyun 	 *
229*4882a593Smuzhiyun 	 * TODO: Replace this by drm_clflush_sg() once it can be implemented
230*4882a593Smuzhiyun 	 * without relying on symbols that are not exported.
231*4882a593Smuzhiyun 	 */
232*4882a593Smuzhiyun 	for_each_sgtable_sg(rk_obj->sgt, s, i)
233*4882a593Smuzhiyun 		sg_dma_address(s) = sg_phys(s);
234*4882a593Smuzhiyun 
235*4882a593Smuzhiyun 	dma_sync_sgtable_for_device(drm->dev, rk_obj->sgt, DMA_TO_DEVICE);
236*4882a593Smuzhiyun 
237*4882a593Smuzhiyun 	kvfree(pages);
238*4882a593Smuzhiyun 
239*4882a593Smuzhiyun 	return 0;
240*4882a593Smuzhiyun 
241*4882a593Smuzhiyun err_put_list:
242*4882a593Smuzhiyun 	rockchip_gem_free_list(lists);
243*4882a593Smuzhiyun 	kvfree(dst_pages);
244*4882a593Smuzhiyun err_put_pages:
245*4882a593Smuzhiyun 	drm_gem_put_pages(&rk_obj->base, rk_obj->pages, false, false);
246*4882a593Smuzhiyun 	return ret;
247*4882a593Smuzhiyun }
248*4882a593Smuzhiyun 
rockchip_gem_put_pages(struct rockchip_gem_object * rk_obj)249*4882a593Smuzhiyun static void rockchip_gem_put_pages(struct rockchip_gem_object *rk_obj)
250*4882a593Smuzhiyun {
251*4882a593Smuzhiyun 	sg_free_table(rk_obj->sgt);
252*4882a593Smuzhiyun 	kfree(rk_obj->sgt);
253*4882a593Smuzhiyun 	drm_gem_put_pages(&rk_obj->base, rk_obj->pages, true, true);
254*4882a593Smuzhiyun }
255*4882a593Smuzhiyun 
256*4882a593Smuzhiyun static inline void *drm_calloc_large(size_t nmemb, size_t size);
257*4882a593Smuzhiyun static inline void drm_free_large(void *ptr);
258*4882a593Smuzhiyun static void rockchip_gem_free_dma(struct rockchip_gem_object *rk_obj);
rockchip_gem_alloc_dma(struct rockchip_gem_object * rk_obj,bool alloc_kmap)259*4882a593Smuzhiyun static int rockchip_gem_alloc_dma(struct rockchip_gem_object *rk_obj,
260*4882a593Smuzhiyun 				  bool alloc_kmap)
261*4882a593Smuzhiyun {
262*4882a593Smuzhiyun 	struct drm_gem_object *obj = &rk_obj->base;
263*4882a593Smuzhiyun 	struct drm_device *drm = obj->dev;
264*4882a593Smuzhiyun 	struct sg_table *sgt;
265*4882a593Smuzhiyun 	int ret, i;
266*4882a593Smuzhiyun 	struct scatterlist *s;
267*4882a593Smuzhiyun 
268*4882a593Smuzhiyun 	rk_obj->dma_attrs = DMA_ATTR_WRITE_COMBINE;
269*4882a593Smuzhiyun 
270*4882a593Smuzhiyun 	if (!alloc_kmap)
271*4882a593Smuzhiyun 		rk_obj->dma_attrs |= DMA_ATTR_NO_KERNEL_MAPPING;
272*4882a593Smuzhiyun 
273*4882a593Smuzhiyun 	rk_obj->kvaddr = dma_alloc_attrs(drm->dev, obj->size,
274*4882a593Smuzhiyun 					 &rk_obj->dma_handle, GFP_KERNEL,
275*4882a593Smuzhiyun 					 rk_obj->dma_attrs);
276*4882a593Smuzhiyun 	if (!rk_obj->kvaddr) {
277*4882a593Smuzhiyun 		DRM_ERROR("failed to allocate %zu byte dma buffer", obj->size);
278*4882a593Smuzhiyun 		return -ENOMEM;
279*4882a593Smuzhiyun 	}
280*4882a593Smuzhiyun 
281*4882a593Smuzhiyun 	sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
282*4882a593Smuzhiyun 	if (!sgt) {
283*4882a593Smuzhiyun 		ret = -ENOMEM;
284*4882a593Smuzhiyun 		goto err_dma_free;
285*4882a593Smuzhiyun 	}
286*4882a593Smuzhiyun 
287*4882a593Smuzhiyun 	ret = dma_get_sgtable_attrs(drm->dev, sgt, rk_obj->kvaddr,
288*4882a593Smuzhiyun 				    rk_obj->dma_handle, obj->size,
289*4882a593Smuzhiyun 				    rk_obj->dma_attrs);
290*4882a593Smuzhiyun 	if (ret) {
291*4882a593Smuzhiyun 		DRM_ERROR("failed to allocate sgt, %d\n", ret);
292*4882a593Smuzhiyun 		goto err_sgt_free;
293*4882a593Smuzhiyun 	}
294*4882a593Smuzhiyun 
295*4882a593Smuzhiyun 	for_each_sg(sgt->sgl, s, sgt->nents, i)
296*4882a593Smuzhiyun 		sg_dma_address(s) = sg_phys(s);
297*4882a593Smuzhiyun 
298*4882a593Smuzhiyun 	rk_obj->num_pages = rk_obj->base.size >> PAGE_SHIFT;
299*4882a593Smuzhiyun 
300*4882a593Smuzhiyun 	rk_obj->pages = drm_calloc_large(rk_obj->num_pages,
301*4882a593Smuzhiyun 					 sizeof(*rk_obj->pages));
302*4882a593Smuzhiyun 	if (!rk_obj->pages) {
303*4882a593Smuzhiyun 		ret = -ENOMEM;
304*4882a593Smuzhiyun 		DRM_ERROR("failed to allocate pages.\n");
305*4882a593Smuzhiyun 		goto err_sg_table_free;
306*4882a593Smuzhiyun 	}
307*4882a593Smuzhiyun 
308*4882a593Smuzhiyun 	if (drm_prime_sg_to_page_addr_arrays(sgt, rk_obj->pages, NULL,
309*4882a593Smuzhiyun 					     rk_obj->num_pages)) {
310*4882a593Smuzhiyun 		DRM_ERROR("invalid sgtable.\n");
311*4882a593Smuzhiyun 		ret = -EINVAL;
312*4882a593Smuzhiyun 		goto err_page_free;
313*4882a593Smuzhiyun 	}
314*4882a593Smuzhiyun 
315*4882a593Smuzhiyun 	rk_obj->sgt = sgt;
316*4882a593Smuzhiyun 
317*4882a593Smuzhiyun 	return 0;
318*4882a593Smuzhiyun 
319*4882a593Smuzhiyun err_page_free:
320*4882a593Smuzhiyun 	drm_free_large(rk_obj->pages);
321*4882a593Smuzhiyun err_sg_table_free:
322*4882a593Smuzhiyun 	sg_free_table(sgt);
323*4882a593Smuzhiyun err_sgt_free:
324*4882a593Smuzhiyun 	kfree(sgt);
325*4882a593Smuzhiyun err_dma_free:
326*4882a593Smuzhiyun 	dma_free_attrs(drm->dev, obj->size, rk_obj->kvaddr,
327*4882a593Smuzhiyun 		       rk_obj->dma_handle, rk_obj->dma_attrs);
328*4882a593Smuzhiyun 
329*4882a593Smuzhiyun 	return ret;
330*4882a593Smuzhiyun }
331*4882a593Smuzhiyun 
drm_calloc_large(size_t nmemb,size_t size)332*4882a593Smuzhiyun static inline void *drm_calloc_large(size_t nmemb, size_t size)
333*4882a593Smuzhiyun {
334*4882a593Smuzhiyun 	if (size != 0 && nmemb > SIZE_MAX / size)
335*4882a593Smuzhiyun 		return NULL;
336*4882a593Smuzhiyun 
337*4882a593Smuzhiyun 	if (size * nmemb <= PAGE_SIZE)
338*4882a593Smuzhiyun 		return kcalloc(nmemb, size, GFP_KERNEL);
339*4882a593Smuzhiyun 
340*4882a593Smuzhiyun 	return __vmalloc(size * nmemb,
341*4882a593Smuzhiyun 			 GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO);
342*4882a593Smuzhiyun }
343*4882a593Smuzhiyun 
drm_free_large(void * ptr)344*4882a593Smuzhiyun static inline void drm_free_large(void *ptr)
345*4882a593Smuzhiyun {
346*4882a593Smuzhiyun 	kvfree(ptr);
347*4882a593Smuzhiyun }
348*4882a593Smuzhiyun 
rockchip_gem_alloc_secure(struct rockchip_gem_object * rk_obj)349*4882a593Smuzhiyun static int rockchip_gem_alloc_secure(struct rockchip_gem_object *rk_obj)
350*4882a593Smuzhiyun {
351*4882a593Smuzhiyun 	struct drm_gem_object *obj = &rk_obj->base;
352*4882a593Smuzhiyun 	struct drm_device *drm = obj->dev;
353*4882a593Smuzhiyun 	struct rockchip_drm_private *private = drm->dev_private;
354*4882a593Smuzhiyun 	unsigned long paddr;
355*4882a593Smuzhiyun 	struct sg_table *sgt;
356*4882a593Smuzhiyun 	int ret = 0, i;
357*4882a593Smuzhiyun 
358*4882a593Smuzhiyun 	if (!private->secure_buffer_pool) {
359*4882a593Smuzhiyun 		DRM_ERROR("No secure buffer pool found\n");
360*4882a593Smuzhiyun 		return -ENOMEM;
361*4882a593Smuzhiyun 	}
362*4882a593Smuzhiyun 
363*4882a593Smuzhiyun 	paddr = gen_pool_alloc(private->secure_buffer_pool, rk_obj->base.size);
364*4882a593Smuzhiyun 	if (!paddr) {
365*4882a593Smuzhiyun 		DRM_ERROR("failed to allocate secure buffer\n");
366*4882a593Smuzhiyun 		return -ENOMEM;
367*4882a593Smuzhiyun 	}
368*4882a593Smuzhiyun 
369*4882a593Smuzhiyun 	rk_obj->dma_handle = paddr;
370*4882a593Smuzhiyun 	rk_obj->num_pages = rk_obj->base.size >> PAGE_SHIFT;
371*4882a593Smuzhiyun 
372*4882a593Smuzhiyun 	rk_obj->pages = drm_calloc_large(rk_obj->num_pages,
373*4882a593Smuzhiyun 					 sizeof(*rk_obj->pages));
374*4882a593Smuzhiyun 	if (!rk_obj->pages) {
375*4882a593Smuzhiyun 		DRM_ERROR("failed to allocate pages.\n");
376*4882a593Smuzhiyun 		ret = -ENOMEM;
377*4882a593Smuzhiyun 		goto err_buf_free;
378*4882a593Smuzhiyun 	}
379*4882a593Smuzhiyun 
380*4882a593Smuzhiyun 	i = 0;
381*4882a593Smuzhiyun 	while (i < rk_obj->num_pages) {
382*4882a593Smuzhiyun 		rk_obj->pages[i] = phys_to_page(paddr);
383*4882a593Smuzhiyun 		paddr += PAGE_SIZE;
384*4882a593Smuzhiyun 		i++;
385*4882a593Smuzhiyun 	}
386*4882a593Smuzhiyun 	sgt = drm_prime_pages_to_sg(obj->dev, rk_obj->pages, rk_obj->num_pages);
387*4882a593Smuzhiyun 	if (IS_ERR(sgt)) {
388*4882a593Smuzhiyun 		ret = PTR_ERR(sgt);
389*4882a593Smuzhiyun 		goto err_free_pages;
390*4882a593Smuzhiyun 	}
391*4882a593Smuzhiyun 
392*4882a593Smuzhiyun 	rk_obj->sgt = sgt;
393*4882a593Smuzhiyun 
394*4882a593Smuzhiyun 	return 0;
395*4882a593Smuzhiyun 
396*4882a593Smuzhiyun err_free_pages:
397*4882a593Smuzhiyun 	drm_free_large(rk_obj->pages);
398*4882a593Smuzhiyun err_buf_free:
399*4882a593Smuzhiyun 	gen_pool_free(private->secure_buffer_pool, paddr, rk_obj->base.size);
400*4882a593Smuzhiyun 
401*4882a593Smuzhiyun 	return ret;
402*4882a593Smuzhiyun }
403*4882a593Smuzhiyun 
rockchip_gem_free_secure(struct rockchip_gem_object * rk_obj)404*4882a593Smuzhiyun static void rockchip_gem_free_secure(struct rockchip_gem_object *rk_obj)
405*4882a593Smuzhiyun {
406*4882a593Smuzhiyun 	struct drm_gem_object *obj = &rk_obj->base;
407*4882a593Smuzhiyun 	struct drm_device *drm = obj->dev;
408*4882a593Smuzhiyun 	struct rockchip_drm_private *private = drm->dev_private;
409*4882a593Smuzhiyun 
410*4882a593Smuzhiyun 	drm_free_large(rk_obj->pages);
411*4882a593Smuzhiyun 	sg_free_table(rk_obj->sgt);
412*4882a593Smuzhiyun 	kfree(rk_obj->sgt);
413*4882a593Smuzhiyun 	gen_pool_free(private->secure_buffer_pool, rk_obj->dma_handle,
414*4882a593Smuzhiyun 		      rk_obj->base.size);
415*4882a593Smuzhiyun }
416*4882a593Smuzhiyun 
is_vop_enabled(void)417*4882a593Smuzhiyun static inline bool is_vop_enabled(void)
418*4882a593Smuzhiyun {
419*4882a593Smuzhiyun 	return (IS_ENABLED(CONFIG_ROCKCHIP_VOP) || IS_ENABLED(CONFIG_ROCKCHIP_VOP2));
420*4882a593Smuzhiyun }
421*4882a593Smuzhiyun 
rockchip_gem_alloc_buf(struct rockchip_gem_object * rk_obj,bool alloc_kmap)422*4882a593Smuzhiyun static int rockchip_gem_alloc_buf(struct rockchip_gem_object *rk_obj,
423*4882a593Smuzhiyun 				  bool alloc_kmap)
424*4882a593Smuzhiyun {
425*4882a593Smuzhiyun 	struct drm_gem_object *obj = &rk_obj->base;
426*4882a593Smuzhiyun 	struct drm_device *drm = obj->dev;
427*4882a593Smuzhiyun 	struct rockchip_drm_private *private = drm->dev_private;
428*4882a593Smuzhiyun 	int ret = 0;
429*4882a593Smuzhiyun 
430*4882a593Smuzhiyun 	if (!private->domain && is_vop_enabled())
431*4882a593Smuzhiyun 		rk_obj->flags |= ROCKCHIP_BO_CONTIG;
432*4882a593Smuzhiyun 
433*4882a593Smuzhiyun 	if (rk_obj->flags & ROCKCHIP_BO_SECURE) {
434*4882a593Smuzhiyun 		rk_obj->buf_type = ROCKCHIP_GEM_BUF_TYPE_SECURE;
435*4882a593Smuzhiyun 		rk_obj->flags |= ROCKCHIP_BO_CONTIG;
436*4882a593Smuzhiyun 		if (alloc_kmap) {
437*4882a593Smuzhiyun 			DRM_ERROR("Not allow alloc secure buffer with kmap\n");
438*4882a593Smuzhiyun 			return -EINVAL;
439*4882a593Smuzhiyun 		}
440*4882a593Smuzhiyun 		ret = rockchip_gem_alloc_secure(rk_obj);
441*4882a593Smuzhiyun 		if (ret)
442*4882a593Smuzhiyun 			return ret;
443*4882a593Smuzhiyun 	} else if (rk_obj->flags & ROCKCHIP_BO_CONTIG) {
444*4882a593Smuzhiyun 		rk_obj->buf_type = ROCKCHIP_GEM_BUF_TYPE_CMA;
445*4882a593Smuzhiyun 		ret = rockchip_gem_alloc_dma(rk_obj, alloc_kmap);
446*4882a593Smuzhiyun 		if (ret)
447*4882a593Smuzhiyun 			return ret;
448*4882a593Smuzhiyun 	} else {
449*4882a593Smuzhiyun 		rk_obj->buf_type = ROCKCHIP_GEM_BUF_TYPE_SHMEM;
450*4882a593Smuzhiyun 		ret = rockchip_gem_get_pages(rk_obj);
451*4882a593Smuzhiyun 		if (ret < 0)
452*4882a593Smuzhiyun 			return ret;
453*4882a593Smuzhiyun 
454*4882a593Smuzhiyun 		if (alloc_kmap) {
455*4882a593Smuzhiyun 			rk_obj->kvaddr = vmap(rk_obj->pages, rk_obj->num_pages,
456*4882a593Smuzhiyun 					      VM_MAP,
457*4882a593Smuzhiyun 					      pgprot_writecombine(PAGE_KERNEL));
458*4882a593Smuzhiyun 			if (!rk_obj->kvaddr) {
459*4882a593Smuzhiyun 				DRM_ERROR("failed to vmap() buffer\n");
460*4882a593Smuzhiyun 				ret = -ENOMEM;
461*4882a593Smuzhiyun 				goto err_iommu_free;
462*4882a593Smuzhiyun 			}
463*4882a593Smuzhiyun 		}
464*4882a593Smuzhiyun 	}
465*4882a593Smuzhiyun 
466*4882a593Smuzhiyun 	if (private->domain) {
467*4882a593Smuzhiyun 		ret = rockchip_gem_iommu_map(rk_obj);
468*4882a593Smuzhiyun 		if (ret < 0)
469*4882a593Smuzhiyun 			goto err_free;
470*4882a593Smuzhiyun 	} else if (is_vop_enabled()) {
471*4882a593Smuzhiyun 		WARN_ON(!rk_obj->dma_handle);
472*4882a593Smuzhiyun 		rk_obj->dma_addr = rk_obj->dma_handle;
473*4882a593Smuzhiyun 	}
474*4882a593Smuzhiyun 
475*4882a593Smuzhiyun 	return 0;
476*4882a593Smuzhiyun 
477*4882a593Smuzhiyun err_iommu_free:
478*4882a593Smuzhiyun 	if (private->domain)
479*4882a593Smuzhiyun 		rockchip_gem_iommu_unmap(rk_obj);
480*4882a593Smuzhiyun err_free:
481*4882a593Smuzhiyun 	if (rk_obj->buf_type == ROCKCHIP_GEM_BUF_TYPE_SECURE)
482*4882a593Smuzhiyun 		rockchip_gem_free_secure(rk_obj);
483*4882a593Smuzhiyun 	else if (rk_obj->buf_type == ROCKCHIP_GEM_BUF_TYPE_CMA)
484*4882a593Smuzhiyun 		rockchip_gem_free_dma(rk_obj);
485*4882a593Smuzhiyun 	else
486*4882a593Smuzhiyun 		rockchip_gem_put_pages(rk_obj);
487*4882a593Smuzhiyun 	return ret;
488*4882a593Smuzhiyun }
489*4882a593Smuzhiyun 
rockchip_gem_free_dma(struct rockchip_gem_object * rk_obj)490*4882a593Smuzhiyun static void rockchip_gem_free_dma(struct rockchip_gem_object *rk_obj)
491*4882a593Smuzhiyun {
492*4882a593Smuzhiyun 	struct drm_gem_object *obj = &rk_obj->base;
493*4882a593Smuzhiyun 	struct drm_device *drm = obj->dev;
494*4882a593Smuzhiyun 
495*4882a593Smuzhiyun 	drm_free_large(rk_obj->pages);
496*4882a593Smuzhiyun 	sg_free_table(rk_obj->sgt);
497*4882a593Smuzhiyun 	kfree(rk_obj->sgt);
498*4882a593Smuzhiyun 	dma_free_attrs(drm->dev, obj->size, rk_obj->kvaddr,
499*4882a593Smuzhiyun 		       rk_obj->dma_handle, rk_obj->dma_attrs);
500*4882a593Smuzhiyun }
501*4882a593Smuzhiyun 
rockchip_gem_free_buf(struct rockchip_gem_object * rk_obj)502*4882a593Smuzhiyun static void rockchip_gem_free_buf(struct rockchip_gem_object *rk_obj)
503*4882a593Smuzhiyun {
504*4882a593Smuzhiyun 	struct drm_device *drm = rk_obj->base.dev;
505*4882a593Smuzhiyun 	struct rockchip_drm_private *private = drm->dev_private;
506*4882a593Smuzhiyun 
507*4882a593Smuzhiyun 	if (private->domain)
508*4882a593Smuzhiyun 		rockchip_gem_iommu_unmap(rk_obj);
509*4882a593Smuzhiyun 
510*4882a593Smuzhiyun 	if (rk_obj->buf_type == ROCKCHIP_GEM_BUF_TYPE_SHMEM) {
511*4882a593Smuzhiyun 		vunmap(rk_obj->kvaddr);
512*4882a593Smuzhiyun 		rockchip_gem_put_pages(rk_obj);
513*4882a593Smuzhiyun 	} else if (rk_obj->buf_type == ROCKCHIP_GEM_BUF_TYPE_SECURE) {
514*4882a593Smuzhiyun 		rockchip_gem_free_secure(rk_obj);
515*4882a593Smuzhiyun 	} else {
516*4882a593Smuzhiyun 		rockchip_gem_free_dma(rk_obj);
517*4882a593Smuzhiyun 	}
518*4882a593Smuzhiyun }
519*4882a593Smuzhiyun 
rockchip_drm_gem_object_mmap_iommu(struct drm_gem_object * obj,struct vm_area_struct * vma)520*4882a593Smuzhiyun static int rockchip_drm_gem_object_mmap_iommu(struct drm_gem_object *obj,
521*4882a593Smuzhiyun 					      struct vm_area_struct *vma)
522*4882a593Smuzhiyun {
523*4882a593Smuzhiyun 	struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj);
524*4882a593Smuzhiyun 	unsigned int count = obj->size >> PAGE_SHIFT;
525*4882a593Smuzhiyun 	unsigned long user_count = vma_pages(vma);
526*4882a593Smuzhiyun 
527*4882a593Smuzhiyun 	if (user_count == 0)
528*4882a593Smuzhiyun 		return -ENXIO;
529*4882a593Smuzhiyun 
530*4882a593Smuzhiyun 	return vm_map_pages(vma, rk_obj->pages, count);
531*4882a593Smuzhiyun }
532*4882a593Smuzhiyun 
rockchip_drm_gem_object_mmap_dma(struct drm_gem_object * obj,struct vm_area_struct * vma)533*4882a593Smuzhiyun static int rockchip_drm_gem_object_mmap_dma(struct drm_gem_object *obj,
534*4882a593Smuzhiyun 					    struct vm_area_struct *vma)
535*4882a593Smuzhiyun {
536*4882a593Smuzhiyun 	struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj);
537*4882a593Smuzhiyun 	struct drm_device *drm = obj->dev;
538*4882a593Smuzhiyun 
539*4882a593Smuzhiyun 	return dma_mmap_attrs(drm->dev, vma, rk_obj->kvaddr, rk_obj->dma_addr,
540*4882a593Smuzhiyun 			      obj->size, rk_obj->dma_attrs);
541*4882a593Smuzhiyun }
542*4882a593Smuzhiyun 
rockchip_drm_gem_object_mmap(struct drm_gem_object * obj,struct vm_area_struct * vma)543*4882a593Smuzhiyun static int rockchip_drm_gem_object_mmap(struct drm_gem_object *obj,
544*4882a593Smuzhiyun 					struct vm_area_struct *vma)
545*4882a593Smuzhiyun {
546*4882a593Smuzhiyun 	int ret;
547*4882a593Smuzhiyun 	struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj);
548*4882a593Smuzhiyun 
549*4882a593Smuzhiyun 	/* default is wc. */
550*4882a593Smuzhiyun 	if (rk_obj->flags & ROCKCHIP_BO_CACHABLE)
551*4882a593Smuzhiyun 		vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
552*4882a593Smuzhiyun 
553*4882a593Smuzhiyun 	/*
554*4882a593Smuzhiyun 	 * We allocated a struct page table for rk_obj, so clear
555*4882a593Smuzhiyun 	 * VM_PFNMAP flag that was set by drm_gem_mmap_obj()/drm_gem_mmap().
556*4882a593Smuzhiyun 	 */
557*4882a593Smuzhiyun 	vma->vm_flags &= ~VM_PFNMAP;
558*4882a593Smuzhiyun 
559*4882a593Smuzhiyun 	if (rk_obj->buf_type == ROCKCHIP_GEM_BUF_TYPE_SECURE) {
560*4882a593Smuzhiyun 		DRM_ERROR("Disallow mmap for secure buffer\n");
561*4882a593Smuzhiyun 		ret = -EINVAL;
562*4882a593Smuzhiyun 	} else if (rk_obj->pages) {
563*4882a593Smuzhiyun 		ret = rockchip_drm_gem_object_mmap_iommu(obj, vma);
564*4882a593Smuzhiyun 	} else {
565*4882a593Smuzhiyun 		ret = rockchip_drm_gem_object_mmap_dma(obj, vma);
566*4882a593Smuzhiyun 	}
567*4882a593Smuzhiyun 
568*4882a593Smuzhiyun 	if (ret)
569*4882a593Smuzhiyun 		drm_gem_vm_close(vma);
570*4882a593Smuzhiyun 
571*4882a593Smuzhiyun 	return ret;
572*4882a593Smuzhiyun }
573*4882a593Smuzhiyun 
rockchip_gem_mmap_buf(struct drm_gem_object * obj,struct vm_area_struct * vma)574*4882a593Smuzhiyun int rockchip_gem_mmap_buf(struct drm_gem_object *obj,
575*4882a593Smuzhiyun 			  struct vm_area_struct *vma)
576*4882a593Smuzhiyun {
577*4882a593Smuzhiyun 	int ret;
578*4882a593Smuzhiyun 
579*4882a593Smuzhiyun 	ret = drm_gem_mmap_obj(obj, obj->size, vma);
580*4882a593Smuzhiyun 	if (ret)
581*4882a593Smuzhiyun 		return ret;
582*4882a593Smuzhiyun 
583*4882a593Smuzhiyun 	return rockchip_drm_gem_object_mmap(obj, vma);
584*4882a593Smuzhiyun }
585*4882a593Smuzhiyun 
586*4882a593Smuzhiyun /* drm driver mmap file operations */
rockchip_gem_mmap(struct file * filp,struct vm_area_struct * vma)587*4882a593Smuzhiyun int rockchip_gem_mmap(struct file *filp, struct vm_area_struct *vma)
588*4882a593Smuzhiyun {
589*4882a593Smuzhiyun 	struct drm_gem_object *obj;
590*4882a593Smuzhiyun 	int ret;
591*4882a593Smuzhiyun 
592*4882a593Smuzhiyun 	ret = drm_gem_mmap(filp, vma);
593*4882a593Smuzhiyun 	if (ret)
594*4882a593Smuzhiyun 		return ret;
595*4882a593Smuzhiyun 
596*4882a593Smuzhiyun 	/*
597*4882a593Smuzhiyun 	 * Set vm_pgoff (used as a fake buffer offset by DRM) to 0 and map the
598*4882a593Smuzhiyun 	 * whole buffer from the start.
599*4882a593Smuzhiyun 	 */
600*4882a593Smuzhiyun 	vma->vm_pgoff = 0;
601*4882a593Smuzhiyun 
602*4882a593Smuzhiyun 	obj = vma->vm_private_data;
603*4882a593Smuzhiyun 
604*4882a593Smuzhiyun 	return rockchip_drm_gem_object_mmap(obj, vma);
605*4882a593Smuzhiyun }
606*4882a593Smuzhiyun 
rockchip_gem_release_object(struct rockchip_gem_object * rk_obj)607*4882a593Smuzhiyun static void rockchip_gem_release_object(struct rockchip_gem_object *rk_obj)
608*4882a593Smuzhiyun {
609*4882a593Smuzhiyun 	drm_gem_object_release(&rk_obj->base);
610*4882a593Smuzhiyun 	kfree(rk_obj);
611*4882a593Smuzhiyun }
612*4882a593Smuzhiyun 
613*4882a593Smuzhiyun static struct rockchip_gem_object *
rockchip_gem_alloc_object(struct drm_device * drm,unsigned int size,unsigned int flags)614*4882a593Smuzhiyun rockchip_gem_alloc_object(struct drm_device *drm, unsigned int size,
615*4882a593Smuzhiyun 			  unsigned int flags)
616*4882a593Smuzhiyun {
617*4882a593Smuzhiyun 	struct address_space *mapping;
618*4882a593Smuzhiyun 	struct rockchip_gem_object *rk_obj;
619*4882a593Smuzhiyun 	struct drm_gem_object *obj;
620*4882a593Smuzhiyun 
621*4882a593Smuzhiyun #ifdef CONFIG_ARM_LPAE
622*4882a593Smuzhiyun 	gfp_t gfp_mask = GFP_HIGHUSER | __GFP_RECLAIMABLE | __GFP_DMA32;
623*4882a593Smuzhiyun #else
624*4882a593Smuzhiyun 	gfp_t gfp_mask = GFP_HIGHUSER | __GFP_RECLAIMABLE;
625*4882a593Smuzhiyun #endif
626*4882a593Smuzhiyun 
627*4882a593Smuzhiyun 	if (flags & ROCKCHIP_BO_DMA32)
628*4882a593Smuzhiyun 		gfp_mask |= __GFP_DMA32;
629*4882a593Smuzhiyun 
630*4882a593Smuzhiyun 	size = round_up(size, PAGE_SIZE);
631*4882a593Smuzhiyun 
632*4882a593Smuzhiyun 	rk_obj = kzalloc(sizeof(*rk_obj), GFP_KERNEL);
633*4882a593Smuzhiyun 	if (!rk_obj)
634*4882a593Smuzhiyun 		return ERR_PTR(-ENOMEM);
635*4882a593Smuzhiyun 
636*4882a593Smuzhiyun 	obj = &rk_obj->base;
637*4882a593Smuzhiyun 
638*4882a593Smuzhiyun 	drm_gem_object_init(drm, obj, size);
639*4882a593Smuzhiyun 
640*4882a593Smuzhiyun 	mapping = file_inode(obj->filp)->i_mapping;
641*4882a593Smuzhiyun 	mapping_set_gfp_mask(mapping, gfp_mask);
642*4882a593Smuzhiyun 
643*4882a593Smuzhiyun 	return rk_obj;
644*4882a593Smuzhiyun }
645*4882a593Smuzhiyun 
646*4882a593Smuzhiyun struct rockchip_gem_object *
rockchip_gem_create_object(struct drm_device * drm,unsigned int size,bool alloc_kmap,unsigned int flags)647*4882a593Smuzhiyun rockchip_gem_create_object(struct drm_device *drm, unsigned int size,
648*4882a593Smuzhiyun 			   bool alloc_kmap, unsigned int flags)
649*4882a593Smuzhiyun {
650*4882a593Smuzhiyun 	struct rockchip_gem_object *rk_obj;
651*4882a593Smuzhiyun 	int ret;
652*4882a593Smuzhiyun 
653*4882a593Smuzhiyun 	rk_obj = rockchip_gem_alloc_object(drm, size, flags);
654*4882a593Smuzhiyun 	if (IS_ERR(rk_obj))
655*4882a593Smuzhiyun 		return rk_obj;
656*4882a593Smuzhiyun 	rk_obj->flags = flags;
657*4882a593Smuzhiyun 
658*4882a593Smuzhiyun 	ret = rockchip_gem_alloc_buf(rk_obj, alloc_kmap);
659*4882a593Smuzhiyun 	if (ret)
660*4882a593Smuzhiyun 		goto err_free_rk_obj;
661*4882a593Smuzhiyun 
662*4882a593Smuzhiyun 	return rk_obj;
663*4882a593Smuzhiyun 
664*4882a593Smuzhiyun err_free_rk_obj:
665*4882a593Smuzhiyun 	rockchip_gem_release_object(rk_obj);
666*4882a593Smuzhiyun 	return ERR_PTR(ret);
667*4882a593Smuzhiyun }
668*4882a593Smuzhiyun 
669*4882a593Smuzhiyun /*
670*4882a593Smuzhiyun  * rockchip_gem_destroy - destroy gem object
671*4882a593Smuzhiyun  *
672*4882a593Smuzhiyun  * The dma_buf_unmap_attachment and dma_buf_detach will be re-defined if
673*4882a593Smuzhiyun  * CONFIG_DMABUF_CACHE is enabled.
674*4882a593Smuzhiyun  *
675*4882a593Smuzhiyun  * Same as drm_prime_gem_destroy
676*4882a593Smuzhiyun  */
rockchip_gem_destroy(struct drm_gem_object * obj,struct sg_table * sg)677*4882a593Smuzhiyun static void rockchip_gem_destroy(struct drm_gem_object *obj, struct sg_table *sg)
678*4882a593Smuzhiyun {
679*4882a593Smuzhiyun 	struct dma_buf_attachment *attach;
680*4882a593Smuzhiyun 	struct dma_buf *dma_buf;
681*4882a593Smuzhiyun 
682*4882a593Smuzhiyun 	attach = obj->import_attach;
683*4882a593Smuzhiyun 	if (sg)
684*4882a593Smuzhiyun 		dma_buf_unmap_attachment(attach, sg, DMA_BIDIRECTIONAL);
685*4882a593Smuzhiyun 	dma_buf = attach->dmabuf;
686*4882a593Smuzhiyun 	dma_buf_detach(attach->dmabuf, attach);
687*4882a593Smuzhiyun 	/* remove the reference */
688*4882a593Smuzhiyun 	dma_buf_put(dma_buf);
689*4882a593Smuzhiyun }
690*4882a593Smuzhiyun 
691*4882a593Smuzhiyun /*
692*4882a593Smuzhiyun  * rockchip_gem_free_object - (struct drm_driver)->gem_free_object_unlocked
693*4882a593Smuzhiyun  * callback function
694*4882a593Smuzhiyun  */
rockchip_gem_free_object(struct drm_gem_object * obj)695*4882a593Smuzhiyun void rockchip_gem_free_object(struct drm_gem_object *obj)
696*4882a593Smuzhiyun {
697*4882a593Smuzhiyun 	struct drm_device *drm = obj->dev;
698*4882a593Smuzhiyun 	struct rockchip_drm_private *private = drm->dev_private;
699*4882a593Smuzhiyun 	struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj);
700*4882a593Smuzhiyun 
701*4882a593Smuzhiyun 	if (obj->import_attach) {
702*4882a593Smuzhiyun 		if (private->domain) {
703*4882a593Smuzhiyun 			rockchip_gem_iommu_unmap(rk_obj);
704*4882a593Smuzhiyun 		} else {
705*4882a593Smuzhiyun 			dma_unmap_sgtable(drm->dev, rk_obj->sgt,
706*4882a593Smuzhiyun 					  DMA_BIDIRECTIONAL, 0);
707*4882a593Smuzhiyun 		}
708*4882a593Smuzhiyun 		drm_free_large(rk_obj->pages);
709*4882a593Smuzhiyun 		rockchip_gem_destroy(obj, rk_obj->sgt);
710*4882a593Smuzhiyun 	} else {
711*4882a593Smuzhiyun 		rockchip_gem_free_buf(rk_obj);
712*4882a593Smuzhiyun 	}
713*4882a593Smuzhiyun 
714*4882a593Smuzhiyun 	rockchip_gem_release_object(rk_obj);
715*4882a593Smuzhiyun }
716*4882a593Smuzhiyun 
717*4882a593Smuzhiyun /*
718*4882a593Smuzhiyun  * rockchip_gem_create_with_handle - allocate an object with the given
719*4882a593Smuzhiyun  * size and create a gem handle on it
720*4882a593Smuzhiyun  *
721*4882a593Smuzhiyun  * returns a struct rockchip_gem_object* on success or ERR_PTR values
722*4882a593Smuzhiyun  * on failure.
723*4882a593Smuzhiyun  */
724*4882a593Smuzhiyun static struct rockchip_gem_object *
rockchip_gem_create_with_handle(struct drm_file * file_priv,struct drm_device * drm,unsigned int size,unsigned int * handle,unsigned int flags)725*4882a593Smuzhiyun rockchip_gem_create_with_handle(struct drm_file *file_priv,
726*4882a593Smuzhiyun 				struct drm_device *drm, unsigned int size,
727*4882a593Smuzhiyun 				unsigned int *handle, unsigned int flags)
728*4882a593Smuzhiyun {
729*4882a593Smuzhiyun 	struct rockchip_gem_object *rk_obj;
730*4882a593Smuzhiyun 	struct drm_gem_object *obj;
731*4882a593Smuzhiyun 	int ret;
732*4882a593Smuzhiyun 	bool alloc_kmap = flags & ROCKCHIP_BO_ALLOC_KMAP ? true : false;
733*4882a593Smuzhiyun 
734*4882a593Smuzhiyun 	rk_obj = rockchip_gem_create_object(drm, size, alloc_kmap, flags);
735*4882a593Smuzhiyun 	if (IS_ERR(rk_obj))
736*4882a593Smuzhiyun 		return ERR_CAST(rk_obj);
737*4882a593Smuzhiyun 
738*4882a593Smuzhiyun 	obj = &rk_obj->base;
739*4882a593Smuzhiyun 
740*4882a593Smuzhiyun 	/*
741*4882a593Smuzhiyun 	 * allocate a id of idr table where the obj is registered
742*4882a593Smuzhiyun 	 * and handle has the id what user can see.
743*4882a593Smuzhiyun 	 */
744*4882a593Smuzhiyun 	ret = drm_gem_handle_create(file_priv, obj, handle);
745*4882a593Smuzhiyun 	if (ret)
746*4882a593Smuzhiyun 		goto err_handle_create;
747*4882a593Smuzhiyun 
748*4882a593Smuzhiyun 	/* drop reference from allocate - handle holds it now. */
749*4882a593Smuzhiyun 	drm_gem_object_put(obj);
750*4882a593Smuzhiyun 
751*4882a593Smuzhiyun 	return rk_obj;
752*4882a593Smuzhiyun 
753*4882a593Smuzhiyun err_handle_create:
754*4882a593Smuzhiyun 	rockchip_gem_free_object(obj);
755*4882a593Smuzhiyun 
756*4882a593Smuzhiyun 	return ERR_PTR(ret);
757*4882a593Smuzhiyun }
758*4882a593Smuzhiyun 
759*4882a593Smuzhiyun /*
760*4882a593Smuzhiyun  * rockchip_gem_dumb_create - (struct drm_driver)->dumb_create callback
761*4882a593Smuzhiyun  * function
762*4882a593Smuzhiyun  *
763*4882a593Smuzhiyun  * This aligns the pitch and size arguments to the minimum required. wrap
764*4882a593Smuzhiyun  * this into your own function if you need bigger alignment.
765*4882a593Smuzhiyun  */
rockchip_gem_dumb_create(struct drm_file * file_priv,struct drm_device * dev,struct drm_mode_create_dumb * args)766*4882a593Smuzhiyun int rockchip_gem_dumb_create(struct drm_file *file_priv,
767*4882a593Smuzhiyun 			     struct drm_device *dev,
768*4882a593Smuzhiyun 			     struct drm_mode_create_dumb *args)
769*4882a593Smuzhiyun {
770*4882a593Smuzhiyun 	struct rockchip_gem_object *rk_obj;
771*4882a593Smuzhiyun 	u32 min_pitch = args->width * DIV_ROUND_UP(args->bpp, 8);
772*4882a593Smuzhiyun 
773*4882a593Smuzhiyun 	/*
774*4882a593Smuzhiyun 	 * align to 64 bytes since Mali requires it.
775*4882a593Smuzhiyun 	 */
776*4882a593Smuzhiyun 	args->pitch = ALIGN(min_pitch, 64);
777*4882a593Smuzhiyun 	args->size = args->pitch * args->height;
778*4882a593Smuzhiyun 
779*4882a593Smuzhiyun 	rk_obj = rockchip_gem_create_with_handle(file_priv, dev, args->size,
780*4882a593Smuzhiyun 						 &args->handle, args->flags);
781*4882a593Smuzhiyun 
782*4882a593Smuzhiyun 	return PTR_ERR_OR_ZERO(rk_obj);
783*4882a593Smuzhiyun }
784*4882a593Smuzhiyun 
785*4882a593Smuzhiyun /*
786*4882a593Smuzhiyun  * Allocate a sg_table for this GEM object.
787*4882a593Smuzhiyun  * Note: Both the table's contents, and the sg_table itself must be freed by
788*4882a593Smuzhiyun  *       the caller.
789*4882a593Smuzhiyun  * Returns a pointer to the newly allocated sg_table, or an ERR_PTR() error.
790*4882a593Smuzhiyun  */
rockchip_gem_prime_get_sg_table(struct drm_gem_object * obj)791*4882a593Smuzhiyun struct sg_table *rockchip_gem_prime_get_sg_table(struct drm_gem_object *obj)
792*4882a593Smuzhiyun {
793*4882a593Smuzhiyun 	struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj);
794*4882a593Smuzhiyun 	struct drm_device *drm = obj->dev;
795*4882a593Smuzhiyun 	struct sg_table *sgt;
796*4882a593Smuzhiyun 	int ret;
797*4882a593Smuzhiyun 
798*4882a593Smuzhiyun 	if (rk_obj->pages)
799*4882a593Smuzhiyun 		return drm_prime_pages_to_sg(obj->dev, rk_obj->pages, rk_obj->num_pages);
800*4882a593Smuzhiyun 
801*4882a593Smuzhiyun 	sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
802*4882a593Smuzhiyun 	if (!sgt)
803*4882a593Smuzhiyun 		return ERR_PTR(-ENOMEM);
804*4882a593Smuzhiyun 
805*4882a593Smuzhiyun 	ret = dma_get_sgtable_attrs(drm->dev, sgt, rk_obj->kvaddr,
806*4882a593Smuzhiyun 				    rk_obj->dma_addr, obj->size,
807*4882a593Smuzhiyun 				    rk_obj->dma_attrs);
808*4882a593Smuzhiyun 	if (ret) {
809*4882a593Smuzhiyun 		DRM_ERROR("failed to allocate sgt, %d\n", ret);
810*4882a593Smuzhiyun 		kfree(sgt);
811*4882a593Smuzhiyun 		return ERR_PTR(ret);
812*4882a593Smuzhiyun 	}
813*4882a593Smuzhiyun 
814*4882a593Smuzhiyun 	return sgt;
815*4882a593Smuzhiyun }
816*4882a593Smuzhiyun 
817*4882a593Smuzhiyun static int
rockchip_gem_iommu_map_sg(struct drm_device * drm,struct dma_buf_attachment * attach,struct sg_table * sg,struct rockchip_gem_object * rk_obj)818*4882a593Smuzhiyun rockchip_gem_iommu_map_sg(struct drm_device *drm,
819*4882a593Smuzhiyun 			  struct dma_buf_attachment *attach,
820*4882a593Smuzhiyun 			  struct sg_table *sg,
821*4882a593Smuzhiyun 			  struct rockchip_gem_object *rk_obj)
822*4882a593Smuzhiyun {
823*4882a593Smuzhiyun 	rk_obj->sgt = sg;
824*4882a593Smuzhiyun 	return rockchip_gem_iommu_map(rk_obj);
825*4882a593Smuzhiyun }
826*4882a593Smuzhiyun 
827*4882a593Smuzhiyun static int
rockchip_gem_dma_map_sg(struct drm_device * drm,struct dma_buf_attachment * attach,struct sg_table * sg,struct rockchip_gem_object * rk_obj)828*4882a593Smuzhiyun rockchip_gem_dma_map_sg(struct drm_device *drm,
829*4882a593Smuzhiyun 			struct dma_buf_attachment *attach,
830*4882a593Smuzhiyun 			struct sg_table *sg,
831*4882a593Smuzhiyun 			struct rockchip_gem_object *rk_obj)
832*4882a593Smuzhiyun {
833*4882a593Smuzhiyun 	int err = dma_map_sgtable(drm->dev, sg, DMA_BIDIRECTIONAL, 0);
834*4882a593Smuzhiyun 	if (err)
835*4882a593Smuzhiyun 		return err;
836*4882a593Smuzhiyun 
837*4882a593Smuzhiyun 	if (drm_prime_get_contiguous_size(sg) < attach->dmabuf->size) {
838*4882a593Smuzhiyun 		DRM_ERROR("failed to map sg_table to contiguous linear address.\n");
839*4882a593Smuzhiyun 		dma_unmap_sgtable(drm->dev, sg, DMA_BIDIRECTIONAL, 0);
840*4882a593Smuzhiyun 		return -EINVAL;
841*4882a593Smuzhiyun 	}
842*4882a593Smuzhiyun 
843*4882a593Smuzhiyun 	rk_obj->dma_addr = sg_dma_address(sg->sgl);
844*4882a593Smuzhiyun 	rk_obj->sgt = sg;
845*4882a593Smuzhiyun 	return 0;
846*4882a593Smuzhiyun }
847*4882a593Smuzhiyun 
848*4882a593Smuzhiyun struct drm_gem_object *
rockchip_gem_prime_import_sg_table(struct drm_device * drm,struct dma_buf_attachment * attach,struct sg_table * sg)849*4882a593Smuzhiyun rockchip_gem_prime_import_sg_table(struct drm_device *drm,
850*4882a593Smuzhiyun 				   struct dma_buf_attachment *attach,
851*4882a593Smuzhiyun 				   struct sg_table *sg)
852*4882a593Smuzhiyun {
853*4882a593Smuzhiyun 	struct rockchip_drm_private *private = drm->dev_private;
854*4882a593Smuzhiyun 	struct rockchip_gem_object *rk_obj;
855*4882a593Smuzhiyun 	int ret;
856*4882a593Smuzhiyun 
857*4882a593Smuzhiyun 	rk_obj = rockchip_gem_alloc_object(drm, attach->dmabuf->size, 0);
858*4882a593Smuzhiyun 	if (IS_ERR(rk_obj))
859*4882a593Smuzhiyun 		return ERR_CAST(rk_obj);
860*4882a593Smuzhiyun 
861*4882a593Smuzhiyun 	if (private->domain)
862*4882a593Smuzhiyun 		ret = rockchip_gem_iommu_map_sg(drm, attach, sg, rk_obj);
863*4882a593Smuzhiyun 	else
864*4882a593Smuzhiyun 		ret = rockchip_gem_dma_map_sg(drm, attach, sg, rk_obj);
865*4882a593Smuzhiyun 
866*4882a593Smuzhiyun 	if (ret < 0) {
867*4882a593Smuzhiyun 		DRM_ERROR("failed to import sg table: %d\n", ret);
868*4882a593Smuzhiyun 		goto err_free_rk_obj;
869*4882a593Smuzhiyun 	}
870*4882a593Smuzhiyun 
871*4882a593Smuzhiyun 	rk_obj->num_pages = rk_obj->base.size >> PAGE_SHIFT;
872*4882a593Smuzhiyun 	rk_obj->pages = drm_calloc_large(rk_obj->num_pages, sizeof(*rk_obj->pages));
873*4882a593Smuzhiyun 	if (!rk_obj->pages) {
874*4882a593Smuzhiyun 		DRM_ERROR("failed to allocate pages.\n");
875*4882a593Smuzhiyun 		ret = -ENOMEM;
876*4882a593Smuzhiyun 		goto err_free_rk_obj;
877*4882a593Smuzhiyun 	}
878*4882a593Smuzhiyun 
879*4882a593Smuzhiyun 	ret = drm_prime_sg_to_page_addr_arrays(sg, rk_obj->pages, NULL, rk_obj->num_pages);
880*4882a593Smuzhiyun 	if (ret < 0) {
881*4882a593Smuzhiyun 		DRM_ERROR("invalid sgtable.\n");
882*4882a593Smuzhiyun 		drm_free_large(rk_obj->pages);
883*4882a593Smuzhiyun 		goto err_free_rk_obj;
884*4882a593Smuzhiyun 	}
885*4882a593Smuzhiyun 
886*4882a593Smuzhiyun 	return &rk_obj->base;
887*4882a593Smuzhiyun 
888*4882a593Smuzhiyun err_free_rk_obj:
889*4882a593Smuzhiyun 	rockchip_gem_release_object(rk_obj);
890*4882a593Smuzhiyun 	return ERR_PTR(ret);
891*4882a593Smuzhiyun }
892*4882a593Smuzhiyun 
rockchip_gem_prime_vmap(struct drm_gem_object * obj)893*4882a593Smuzhiyun void *rockchip_gem_prime_vmap(struct drm_gem_object *obj)
894*4882a593Smuzhiyun {
895*4882a593Smuzhiyun 	struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj);
896*4882a593Smuzhiyun 
897*4882a593Smuzhiyun 	if (rk_obj->pages)
898*4882a593Smuzhiyun 		return vmap(rk_obj->pages, rk_obj->num_pages, VM_MAP,
899*4882a593Smuzhiyun 			    pgprot_writecombine(PAGE_KERNEL));
900*4882a593Smuzhiyun 
901*4882a593Smuzhiyun 	if (rk_obj->dma_attrs & DMA_ATTR_NO_KERNEL_MAPPING)
902*4882a593Smuzhiyun 		return NULL;
903*4882a593Smuzhiyun 
904*4882a593Smuzhiyun 	return rk_obj->kvaddr;
905*4882a593Smuzhiyun }
906*4882a593Smuzhiyun 
rockchip_gem_prime_vunmap(struct drm_gem_object * obj,void * vaddr)907*4882a593Smuzhiyun void rockchip_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
908*4882a593Smuzhiyun {
909*4882a593Smuzhiyun 	struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj);
910*4882a593Smuzhiyun 
911*4882a593Smuzhiyun 	if (rk_obj->pages) {
912*4882a593Smuzhiyun 		vunmap(vaddr);
913*4882a593Smuzhiyun 		return;
914*4882a593Smuzhiyun 	}
915*4882a593Smuzhiyun 
916*4882a593Smuzhiyun 	/* Nothing to do if allocated by DMA mapping API. */
917*4882a593Smuzhiyun }
918*4882a593Smuzhiyun 
rockchip_gem_create_ioctl(struct drm_device * dev,void * data,struct drm_file * file_priv)919*4882a593Smuzhiyun int rockchip_gem_create_ioctl(struct drm_device *dev, void *data,
920*4882a593Smuzhiyun 			      struct drm_file *file_priv)
921*4882a593Smuzhiyun {
922*4882a593Smuzhiyun 	struct drm_rockchip_gem_create *args = data;
923*4882a593Smuzhiyun 	struct rockchip_gem_object *rk_obj;
924*4882a593Smuzhiyun 
925*4882a593Smuzhiyun 	rk_obj = rockchip_gem_create_with_handle(file_priv, dev, args->size,
926*4882a593Smuzhiyun 						 &args->handle, args->flags);
927*4882a593Smuzhiyun 	return PTR_ERR_OR_ZERO(rk_obj);
928*4882a593Smuzhiyun }
929*4882a593Smuzhiyun 
rockchip_gem_map_offset_ioctl(struct drm_device * drm,void * data,struct drm_file * file_priv)930*4882a593Smuzhiyun int rockchip_gem_map_offset_ioctl(struct drm_device *drm, void *data,
931*4882a593Smuzhiyun 				  struct drm_file *file_priv)
932*4882a593Smuzhiyun {
933*4882a593Smuzhiyun 	struct drm_rockchip_gem_map_off *args = data;
934*4882a593Smuzhiyun 
935*4882a593Smuzhiyun 	return drm_gem_dumb_map_offset(file_priv, drm, args->handle,
936*4882a593Smuzhiyun 				       &args->offset);
937*4882a593Smuzhiyun }
938*4882a593Smuzhiyun 
rockchip_gem_get_phys_ioctl(struct drm_device * dev,void * data,struct drm_file * file_priv)939*4882a593Smuzhiyun int rockchip_gem_get_phys_ioctl(struct drm_device *dev, void *data,
940*4882a593Smuzhiyun 				struct drm_file *file_priv)
941*4882a593Smuzhiyun {
942*4882a593Smuzhiyun 	struct drm_rockchip_gem_phys *args = data;
943*4882a593Smuzhiyun 	struct rockchip_gem_object *rk_obj;
944*4882a593Smuzhiyun 	struct drm_gem_object *obj;
945*4882a593Smuzhiyun 	int ret = 0;
946*4882a593Smuzhiyun 
947*4882a593Smuzhiyun 	obj = drm_gem_object_lookup(file_priv, args->handle);
948*4882a593Smuzhiyun 	if (!obj) {
949*4882a593Smuzhiyun 		DRM_ERROR("failed to lookup gem object.\n");
950*4882a593Smuzhiyun 		return -EINVAL;
951*4882a593Smuzhiyun 	}
952*4882a593Smuzhiyun 	rk_obj = to_rockchip_obj(obj);
953*4882a593Smuzhiyun 
954*4882a593Smuzhiyun 	if (!(rk_obj->flags & ROCKCHIP_BO_CONTIG)) {
955*4882a593Smuzhiyun 		DRM_ERROR("Can't get phys address from non-continue buf.\n");
956*4882a593Smuzhiyun 		ret = -EINVAL;
957*4882a593Smuzhiyun 		goto out;
958*4882a593Smuzhiyun 	}
959*4882a593Smuzhiyun 
960*4882a593Smuzhiyun 	args->phy_addr = page_to_phys(rk_obj->pages[0]);
961*4882a593Smuzhiyun 
962*4882a593Smuzhiyun out:
963*4882a593Smuzhiyun 	drm_gem_object_put(obj);
964*4882a593Smuzhiyun 
965*4882a593Smuzhiyun 	return ret;
966*4882a593Smuzhiyun }
967*4882a593Smuzhiyun 
rockchip_gem_prime_begin_cpu_access(struct drm_gem_object * obj,enum dma_data_direction dir)968*4882a593Smuzhiyun int rockchip_gem_prime_begin_cpu_access(struct drm_gem_object *obj,
969*4882a593Smuzhiyun 					enum dma_data_direction dir)
970*4882a593Smuzhiyun {
971*4882a593Smuzhiyun 	struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj);
972*4882a593Smuzhiyun 	struct drm_device *drm = obj->dev;
973*4882a593Smuzhiyun 
974*4882a593Smuzhiyun 	if (!rk_obj->sgt)
975*4882a593Smuzhiyun 		return 0;
976*4882a593Smuzhiyun 
977*4882a593Smuzhiyun 	dma_sync_sg_for_cpu(drm->dev, rk_obj->sgt->sgl,
978*4882a593Smuzhiyun 			    rk_obj->sgt->nents, dir);
979*4882a593Smuzhiyun 	return 0;
980*4882a593Smuzhiyun }
981*4882a593Smuzhiyun 
rockchip_gem_prime_end_cpu_access(struct drm_gem_object * obj,enum dma_data_direction dir)982*4882a593Smuzhiyun int rockchip_gem_prime_end_cpu_access(struct drm_gem_object *obj,
983*4882a593Smuzhiyun 				      enum dma_data_direction dir)
984*4882a593Smuzhiyun {
985*4882a593Smuzhiyun 	struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj);
986*4882a593Smuzhiyun 	struct drm_device *drm = obj->dev;
987*4882a593Smuzhiyun 
988*4882a593Smuzhiyun 	if (!rk_obj->sgt)
989*4882a593Smuzhiyun 		return 0;
990*4882a593Smuzhiyun 
991*4882a593Smuzhiyun 	dma_sync_sg_for_device(drm->dev, rk_obj->sgt->sgl,
992*4882a593Smuzhiyun 			       rk_obj->sgt->nents, dir);
993*4882a593Smuzhiyun 	return 0;
994*4882a593Smuzhiyun }
995