xref: /OK3568_Linux_fs/kernel/drivers/gpu/drm/exynos/exynos_drm_gem.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-or-later
2*4882a593Smuzhiyun /* exynos_drm_gem.c
3*4882a593Smuzhiyun  *
4*4882a593Smuzhiyun  * Copyright (c) 2011 Samsung Electronics Co., Ltd.
5*4882a593Smuzhiyun  * Author: Inki Dae <inki.dae@samsung.com>
6*4882a593Smuzhiyun  */
7*4882a593Smuzhiyun 
8*4882a593Smuzhiyun 
9*4882a593Smuzhiyun #include <linux/dma-buf.h>
10*4882a593Smuzhiyun #include <linux/pfn_t.h>
11*4882a593Smuzhiyun #include <linux/shmem_fs.h>
12*4882a593Smuzhiyun 
13*4882a593Smuzhiyun #include <drm/drm_prime.h>
14*4882a593Smuzhiyun #include <drm/drm_vma_manager.h>
15*4882a593Smuzhiyun #include <drm/exynos_drm.h>
16*4882a593Smuzhiyun 
17*4882a593Smuzhiyun #include "exynos_drm_drv.h"
18*4882a593Smuzhiyun #include "exynos_drm_gem.h"
19*4882a593Smuzhiyun 
exynos_drm_alloc_buf(struct exynos_drm_gem * exynos_gem,bool kvmap)20*4882a593Smuzhiyun static int exynos_drm_alloc_buf(struct exynos_drm_gem *exynos_gem, bool kvmap)
21*4882a593Smuzhiyun {
22*4882a593Smuzhiyun 	struct drm_device *dev = exynos_gem->base.dev;
23*4882a593Smuzhiyun 	unsigned long attr = 0;
24*4882a593Smuzhiyun 
25*4882a593Smuzhiyun 	if (exynos_gem->dma_addr) {
26*4882a593Smuzhiyun 		DRM_DEV_DEBUG_KMS(to_dma_dev(dev), "already allocated.\n");
27*4882a593Smuzhiyun 		return 0;
28*4882a593Smuzhiyun 	}
29*4882a593Smuzhiyun 
30*4882a593Smuzhiyun 	/*
31*4882a593Smuzhiyun 	 * if EXYNOS_BO_CONTIG, fully physically contiguous memory
32*4882a593Smuzhiyun 	 * region will be allocated else physically contiguous
33*4882a593Smuzhiyun 	 * as possible.
34*4882a593Smuzhiyun 	 */
35*4882a593Smuzhiyun 	if (!(exynos_gem->flags & EXYNOS_BO_NONCONTIG))
36*4882a593Smuzhiyun 		attr |= DMA_ATTR_FORCE_CONTIGUOUS;
37*4882a593Smuzhiyun 
38*4882a593Smuzhiyun 	/*
39*4882a593Smuzhiyun 	 * if EXYNOS_BO_WC or EXYNOS_BO_NONCACHABLE, writecombine mapping
40*4882a593Smuzhiyun 	 * else cachable mapping.
41*4882a593Smuzhiyun 	 */
42*4882a593Smuzhiyun 	if (exynos_gem->flags & EXYNOS_BO_WC ||
43*4882a593Smuzhiyun 			!(exynos_gem->flags & EXYNOS_BO_CACHABLE))
44*4882a593Smuzhiyun 		attr |= DMA_ATTR_WRITE_COMBINE;
45*4882a593Smuzhiyun 
46*4882a593Smuzhiyun 	/* FBDev emulation requires kernel mapping */
47*4882a593Smuzhiyun 	if (!kvmap)
48*4882a593Smuzhiyun 		attr |= DMA_ATTR_NO_KERNEL_MAPPING;
49*4882a593Smuzhiyun 
50*4882a593Smuzhiyun 	exynos_gem->dma_attrs = attr;
51*4882a593Smuzhiyun 	exynos_gem->cookie = dma_alloc_attrs(to_dma_dev(dev), exynos_gem->size,
52*4882a593Smuzhiyun 					     &exynos_gem->dma_addr, GFP_KERNEL,
53*4882a593Smuzhiyun 					     exynos_gem->dma_attrs);
54*4882a593Smuzhiyun 	if (!exynos_gem->cookie) {
55*4882a593Smuzhiyun 		DRM_DEV_ERROR(to_dma_dev(dev), "failed to allocate buffer.\n");
56*4882a593Smuzhiyun 		return -ENOMEM;
57*4882a593Smuzhiyun 	}
58*4882a593Smuzhiyun 
59*4882a593Smuzhiyun 	if (kvmap)
60*4882a593Smuzhiyun 		exynos_gem->kvaddr = exynos_gem->cookie;
61*4882a593Smuzhiyun 
62*4882a593Smuzhiyun 	DRM_DEV_DEBUG_KMS(to_dma_dev(dev), "dma_addr(0x%lx), size(0x%lx)\n",
63*4882a593Smuzhiyun 			(unsigned long)exynos_gem->dma_addr, exynos_gem->size);
64*4882a593Smuzhiyun 	return 0;
65*4882a593Smuzhiyun }
66*4882a593Smuzhiyun 
exynos_drm_free_buf(struct exynos_drm_gem * exynos_gem)67*4882a593Smuzhiyun static void exynos_drm_free_buf(struct exynos_drm_gem *exynos_gem)
68*4882a593Smuzhiyun {
69*4882a593Smuzhiyun 	struct drm_device *dev = exynos_gem->base.dev;
70*4882a593Smuzhiyun 
71*4882a593Smuzhiyun 	if (!exynos_gem->dma_addr) {
72*4882a593Smuzhiyun 		DRM_DEV_DEBUG_KMS(dev->dev, "dma_addr is invalid.\n");
73*4882a593Smuzhiyun 		return;
74*4882a593Smuzhiyun 	}
75*4882a593Smuzhiyun 
76*4882a593Smuzhiyun 	DRM_DEV_DEBUG_KMS(dev->dev, "dma_addr(0x%lx), size(0x%lx)\n",
77*4882a593Smuzhiyun 			(unsigned long)exynos_gem->dma_addr, exynos_gem->size);
78*4882a593Smuzhiyun 
79*4882a593Smuzhiyun 	dma_free_attrs(to_dma_dev(dev), exynos_gem->size, exynos_gem->cookie,
80*4882a593Smuzhiyun 			(dma_addr_t)exynos_gem->dma_addr,
81*4882a593Smuzhiyun 			exynos_gem->dma_attrs);
82*4882a593Smuzhiyun }
83*4882a593Smuzhiyun 
exynos_drm_gem_handle_create(struct drm_gem_object * obj,struct drm_file * file_priv,unsigned int * handle)84*4882a593Smuzhiyun static int exynos_drm_gem_handle_create(struct drm_gem_object *obj,
85*4882a593Smuzhiyun 					struct drm_file *file_priv,
86*4882a593Smuzhiyun 					unsigned int *handle)
87*4882a593Smuzhiyun {
88*4882a593Smuzhiyun 	int ret;
89*4882a593Smuzhiyun 
90*4882a593Smuzhiyun 	/*
91*4882a593Smuzhiyun 	 * allocate a id of idr table where the obj is registered
92*4882a593Smuzhiyun 	 * and handle has the id what user can see.
93*4882a593Smuzhiyun 	 */
94*4882a593Smuzhiyun 	ret = drm_gem_handle_create(file_priv, obj, handle);
95*4882a593Smuzhiyun 	if (ret)
96*4882a593Smuzhiyun 		return ret;
97*4882a593Smuzhiyun 
98*4882a593Smuzhiyun 	DRM_DEV_DEBUG_KMS(to_dma_dev(obj->dev), "gem handle = 0x%x\n", *handle);
99*4882a593Smuzhiyun 
100*4882a593Smuzhiyun 	/* drop reference from allocate - handle holds it now. */
101*4882a593Smuzhiyun 	drm_gem_object_put(obj);
102*4882a593Smuzhiyun 
103*4882a593Smuzhiyun 	return 0;
104*4882a593Smuzhiyun }
105*4882a593Smuzhiyun 
exynos_drm_gem_destroy(struct exynos_drm_gem * exynos_gem)106*4882a593Smuzhiyun void exynos_drm_gem_destroy(struct exynos_drm_gem *exynos_gem)
107*4882a593Smuzhiyun {
108*4882a593Smuzhiyun 	struct drm_gem_object *obj = &exynos_gem->base;
109*4882a593Smuzhiyun 
110*4882a593Smuzhiyun 	DRM_DEV_DEBUG_KMS(to_dma_dev(obj->dev), "handle count = %d\n",
111*4882a593Smuzhiyun 			  obj->handle_count);
112*4882a593Smuzhiyun 
113*4882a593Smuzhiyun 	/*
114*4882a593Smuzhiyun 	 * do not release memory region from exporter.
115*4882a593Smuzhiyun 	 *
116*4882a593Smuzhiyun 	 * the region will be released by exporter
117*4882a593Smuzhiyun 	 * once dmabuf's refcount becomes 0.
118*4882a593Smuzhiyun 	 */
119*4882a593Smuzhiyun 	if (obj->import_attach)
120*4882a593Smuzhiyun 		drm_prime_gem_destroy(obj, exynos_gem->sgt);
121*4882a593Smuzhiyun 	else
122*4882a593Smuzhiyun 		exynos_drm_free_buf(exynos_gem);
123*4882a593Smuzhiyun 
124*4882a593Smuzhiyun 	/* release file pointer to gem object. */
125*4882a593Smuzhiyun 	drm_gem_object_release(obj);
126*4882a593Smuzhiyun 
127*4882a593Smuzhiyun 	kfree(exynos_gem);
128*4882a593Smuzhiyun }
129*4882a593Smuzhiyun 
exynos_drm_gem_init(struct drm_device * dev,unsigned long size)130*4882a593Smuzhiyun static struct exynos_drm_gem *exynos_drm_gem_init(struct drm_device *dev,
131*4882a593Smuzhiyun 						  unsigned long size)
132*4882a593Smuzhiyun {
133*4882a593Smuzhiyun 	struct exynos_drm_gem *exynos_gem;
134*4882a593Smuzhiyun 	struct drm_gem_object *obj;
135*4882a593Smuzhiyun 	int ret;
136*4882a593Smuzhiyun 
137*4882a593Smuzhiyun 	exynos_gem = kzalloc(sizeof(*exynos_gem), GFP_KERNEL);
138*4882a593Smuzhiyun 	if (!exynos_gem)
139*4882a593Smuzhiyun 		return ERR_PTR(-ENOMEM);
140*4882a593Smuzhiyun 
141*4882a593Smuzhiyun 	exynos_gem->size = size;
142*4882a593Smuzhiyun 	obj = &exynos_gem->base;
143*4882a593Smuzhiyun 
144*4882a593Smuzhiyun 	ret = drm_gem_object_init(dev, obj, size);
145*4882a593Smuzhiyun 	if (ret < 0) {
146*4882a593Smuzhiyun 		DRM_DEV_ERROR(dev->dev, "failed to initialize gem object\n");
147*4882a593Smuzhiyun 		kfree(exynos_gem);
148*4882a593Smuzhiyun 		return ERR_PTR(ret);
149*4882a593Smuzhiyun 	}
150*4882a593Smuzhiyun 
151*4882a593Smuzhiyun 	ret = drm_gem_create_mmap_offset(obj);
152*4882a593Smuzhiyun 	if (ret < 0) {
153*4882a593Smuzhiyun 		drm_gem_object_release(obj);
154*4882a593Smuzhiyun 		kfree(exynos_gem);
155*4882a593Smuzhiyun 		return ERR_PTR(ret);
156*4882a593Smuzhiyun 	}
157*4882a593Smuzhiyun 
158*4882a593Smuzhiyun 	DRM_DEV_DEBUG_KMS(dev->dev, "created file object = %pK\n", obj->filp);
159*4882a593Smuzhiyun 
160*4882a593Smuzhiyun 	return exynos_gem;
161*4882a593Smuzhiyun }
162*4882a593Smuzhiyun 
exynos_drm_gem_create(struct drm_device * dev,unsigned int flags,unsigned long size,bool kvmap)163*4882a593Smuzhiyun struct exynos_drm_gem *exynos_drm_gem_create(struct drm_device *dev,
164*4882a593Smuzhiyun 					     unsigned int flags,
165*4882a593Smuzhiyun 					     unsigned long size,
166*4882a593Smuzhiyun 					     bool kvmap)
167*4882a593Smuzhiyun {
168*4882a593Smuzhiyun 	struct exynos_drm_gem *exynos_gem;
169*4882a593Smuzhiyun 	int ret;
170*4882a593Smuzhiyun 
171*4882a593Smuzhiyun 	if (flags & ~(EXYNOS_BO_MASK)) {
172*4882a593Smuzhiyun 		DRM_DEV_ERROR(dev->dev,
173*4882a593Smuzhiyun 			      "invalid GEM buffer flags: %u\n", flags);
174*4882a593Smuzhiyun 		return ERR_PTR(-EINVAL);
175*4882a593Smuzhiyun 	}
176*4882a593Smuzhiyun 
177*4882a593Smuzhiyun 	if (!size) {
178*4882a593Smuzhiyun 		DRM_DEV_ERROR(dev->dev, "invalid GEM buffer size: %lu\n", size);
179*4882a593Smuzhiyun 		return ERR_PTR(-EINVAL);
180*4882a593Smuzhiyun 	}
181*4882a593Smuzhiyun 
182*4882a593Smuzhiyun 	size = roundup(size, PAGE_SIZE);
183*4882a593Smuzhiyun 
184*4882a593Smuzhiyun 	exynos_gem = exynos_drm_gem_init(dev, size);
185*4882a593Smuzhiyun 	if (IS_ERR(exynos_gem))
186*4882a593Smuzhiyun 		return exynos_gem;
187*4882a593Smuzhiyun 
188*4882a593Smuzhiyun 	if (!is_drm_iommu_supported(dev) && (flags & EXYNOS_BO_NONCONTIG)) {
189*4882a593Smuzhiyun 		/*
190*4882a593Smuzhiyun 		 * when no IOMMU is available, all allocated buffers are
191*4882a593Smuzhiyun 		 * contiguous anyway, so drop EXYNOS_BO_NONCONTIG flag
192*4882a593Smuzhiyun 		 */
193*4882a593Smuzhiyun 		flags &= ~EXYNOS_BO_NONCONTIG;
194*4882a593Smuzhiyun 		DRM_WARN("Non-contiguous allocation is not supported without IOMMU, falling back to contiguous buffer\n");
195*4882a593Smuzhiyun 	}
196*4882a593Smuzhiyun 
197*4882a593Smuzhiyun 	/* set memory type and cache attribute from user side. */
198*4882a593Smuzhiyun 	exynos_gem->flags = flags;
199*4882a593Smuzhiyun 
200*4882a593Smuzhiyun 	ret = exynos_drm_alloc_buf(exynos_gem, kvmap);
201*4882a593Smuzhiyun 	if (ret < 0) {
202*4882a593Smuzhiyun 		drm_gem_object_release(&exynos_gem->base);
203*4882a593Smuzhiyun 		kfree(exynos_gem);
204*4882a593Smuzhiyun 		return ERR_PTR(ret);
205*4882a593Smuzhiyun 	}
206*4882a593Smuzhiyun 
207*4882a593Smuzhiyun 	return exynos_gem;
208*4882a593Smuzhiyun }
209*4882a593Smuzhiyun 
exynos_drm_gem_create_ioctl(struct drm_device * dev,void * data,struct drm_file * file_priv)210*4882a593Smuzhiyun int exynos_drm_gem_create_ioctl(struct drm_device *dev, void *data,
211*4882a593Smuzhiyun 				struct drm_file *file_priv)
212*4882a593Smuzhiyun {
213*4882a593Smuzhiyun 	struct drm_exynos_gem_create *args = data;
214*4882a593Smuzhiyun 	struct exynos_drm_gem *exynos_gem;
215*4882a593Smuzhiyun 	int ret;
216*4882a593Smuzhiyun 
217*4882a593Smuzhiyun 	exynos_gem = exynos_drm_gem_create(dev, args->flags, args->size, false);
218*4882a593Smuzhiyun 	if (IS_ERR(exynos_gem))
219*4882a593Smuzhiyun 		return PTR_ERR(exynos_gem);
220*4882a593Smuzhiyun 
221*4882a593Smuzhiyun 	ret = exynos_drm_gem_handle_create(&exynos_gem->base, file_priv,
222*4882a593Smuzhiyun 					   &args->handle);
223*4882a593Smuzhiyun 	if (ret) {
224*4882a593Smuzhiyun 		exynos_drm_gem_destroy(exynos_gem);
225*4882a593Smuzhiyun 		return ret;
226*4882a593Smuzhiyun 	}
227*4882a593Smuzhiyun 
228*4882a593Smuzhiyun 	return 0;
229*4882a593Smuzhiyun }
230*4882a593Smuzhiyun 
exynos_drm_gem_map_ioctl(struct drm_device * dev,void * data,struct drm_file * file_priv)231*4882a593Smuzhiyun int exynos_drm_gem_map_ioctl(struct drm_device *dev, void *data,
232*4882a593Smuzhiyun 			     struct drm_file *file_priv)
233*4882a593Smuzhiyun {
234*4882a593Smuzhiyun 	struct drm_exynos_gem_map *args = data;
235*4882a593Smuzhiyun 
236*4882a593Smuzhiyun 	return drm_gem_dumb_map_offset(file_priv, dev, args->handle,
237*4882a593Smuzhiyun 				       &args->offset);
238*4882a593Smuzhiyun }
239*4882a593Smuzhiyun 
exynos_drm_gem_get(struct drm_file * filp,unsigned int gem_handle)240*4882a593Smuzhiyun struct exynos_drm_gem *exynos_drm_gem_get(struct drm_file *filp,
241*4882a593Smuzhiyun 					  unsigned int gem_handle)
242*4882a593Smuzhiyun {
243*4882a593Smuzhiyun 	struct drm_gem_object *obj;
244*4882a593Smuzhiyun 
245*4882a593Smuzhiyun 	obj = drm_gem_object_lookup(filp, gem_handle);
246*4882a593Smuzhiyun 	if (!obj)
247*4882a593Smuzhiyun 		return NULL;
248*4882a593Smuzhiyun 	return to_exynos_gem(obj);
249*4882a593Smuzhiyun }
250*4882a593Smuzhiyun 
exynos_drm_gem_mmap_buffer(struct exynos_drm_gem * exynos_gem,struct vm_area_struct * vma)251*4882a593Smuzhiyun static int exynos_drm_gem_mmap_buffer(struct exynos_drm_gem *exynos_gem,
252*4882a593Smuzhiyun 				      struct vm_area_struct *vma)
253*4882a593Smuzhiyun {
254*4882a593Smuzhiyun 	struct drm_device *drm_dev = exynos_gem->base.dev;
255*4882a593Smuzhiyun 	unsigned long vm_size;
256*4882a593Smuzhiyun 	int ret;
257*4882a593Smuzhiyun 
258*4882a593Smuzhiyun 	vma->vm_flags &= ~VM_PFNMAP;
259*4882a593Smuzhiyun 	vma->vm_pgoff = 0;
260*4882a593Smuzhiyun 
261*4882a593Smuzhiyun 	vm_size = vma->vm_end - vma->vm_start;
262*4882a593Smuzhiyun 
263*4882a593Smuzhiyun 	/* check if user-requested size is valid. */
264*4882a593Smuzhiyun 	if (vm_size > exynos_gem->size)
265*4882a593Smuzhiyun 		return -EINVAL;
266*4882a593Smuzhiyun 
267*4882a593Smuzhiyun 	ret = dma_mmap_attrs(to_dma_dev(drm_dev), vma, exynos_gem->cookie,
268*4882a593Smuzhiyun 			     exynos_gem->dma_addr, exynos_gem->size,
269*4882a593Smuzhiyun 			     exynos_gem->dma_attrs);
270*4882a593Smuzhiyun 	if (ret < 0) {
271*4882a593Smuzhiyun 		DRM_ERROR("failed to mmap.\n");
272*4882a593Smuzhiyun 		return ret;
273*4882a593Smuzhiyun 	}
274*4882a593Smuzhiyun 
275*4882a593Smuzhiyun 	return 0;
276*4882a593Smuzhiyun }
277*4882a593Smuzhiyun 
exynos_drm_gem_get_ioctl(struct drm_device * dev,void * data,struct drm_file * file_priv)278*4882a593Smuzhiyun int exynos_drm_gem_get_ioctl(struct drm_device *dev, void *data,
279*4882a593Smuzhiyun 				      struct drm_file *file_priv)
280*4882a593Smuzhiyun {
281*4882a593Smuzhiyun 	struct exynos_drm_gem *exynos_gem;
282*4882a593Smuzhiyun 	struct drm_exynos_gem_info *args = data;
283*4882a593Smuzhiyun 	struct drm_gem_object *obj;
284*4882a593Smuzhiyun 
285*4882a593Smuzhiyun 	obj = drm_gem_object_lookup(file_priv, args->handle);
286*4882a593Smuzhiyun 	if (!obj) {
287*4882a593Smuzhiyun 		DRM_DEV_ERROR(dev->dev, "failed to lookup gem object.\n");
288*4882a593Smuzhiyun 		return -EINVAL;
289*4882a593Smuzhiyun 	}
290*4882a593Smuzhiyun 
291*4882a593Smuzhiyun 	exynos_gem = to_exynos_gem(obj);
292*4882a593Smuzhiyun 
293*4882a593Smuzhiyun 	args->flags = exynos_gem->flags;
294*4882a593Smuzhiyun 	args->size = exynos_gem->size;
295*4882a593Smuzhiyun 
296*4882a593Smuzhiyun 	drm_gem_object_put(obj);
297*4882a593Smuzhiyun 
298*4882a593Smuzhiyun 	return 0;
299*4882a593Smuzhiyun }
300*4882a593Smuzhiyun 
exynos_drm_gem_free_object(struct drm_gem_object * obj)301*4882a593Smuzhiyun void exynos_drm_gem_free_object(struct drm_gem_object *obj)
302*4882a593Smuzhiyun {
303*4882a593Smuzhiyun 	exynos_drm_gem_destroy(to_exynos_gem(obj));
304*4882a593Smuzhiyun }
305*4882a593Smuzhiyun 
exynos_drm_gem_dumb_create(struct drm_file * file_priv,struct drm_device * dev,struct drm_mode_create_dumb * args)306*4882a593Smuzhiyun int exynos_drm_gem_dumb_create(struct drm_file *file_priv,
307*4882a593Smuzhiyun 			       struct drm_device *dev,
308*4882a593Smuzhiyun 			       struct drm_mode_create_dumb *args)
309*4882a593Smuzhiyun {
310*4882a593Smuzhiyun 	struct exynos_drm_gem *exynos_gem;
311*4882a593Smuzhiyun 	unsigned int flags;
312*4882a593Smuzhiyun 	int ret;
313*4882a593Smuzhiyun 
314*4882a593Smuzhiyun 	/*
315*4882a593Smuzhiyun 	 * allocate memory to be used for framebuffer.
316*4882a593Smuzhiyun 	 * - this callback would be called by user application
317*4882a593Smuzhiyun 	 *	with DRM_IOCTL_MODE_CREATE_DUMB command.
318*4882a593Smuzhiyun 	 */
319*4882a593Smuzhiyun 
320*4882a593Smuzhiyun 	args->pitch = args->width * ((args->bpp + 7) / 8);
321*4882a593Smuzhiyun 	args->size = args->pitch * args->height;
322*4882a593Smuzhiyun 
323*4882a593Smuzhiyun 	if (is_drm_iommu_supported(dev))
324*4882a593Smuzhiyun 		flags = EXYNOS_BO_NONCONTIG | EXYNOS_BO_WC;
325*4882a593Smuzhiyun 	else
326*4882a593Smuzhiyun 		flags = EXYNOS_BO_CONTIG | EXYNOS_BO_WC;
327*4882a593Smuzhiyun 
328*4882a593Smuzhiyun 	exynos_gem = exynos_drm_gem_create(dev, flags, args->size, false);
329*4882a593Smuzhiyun 	if (IS_ERR(exynos_gem)) {
330*4882a593Smuzhiyun 		dev_warn(dev->dev, "FB allocation failed.\n");
331*4882a593Smuzhiyun 		return PTR_ERR(exynos_gem);
332*4882a593Smuzhiyun 	}
333*4882a593Smuzhiyun 
334*4882a593Smuzhiyun 	ret = exynos_drm_gem_handle_create(&exynos_gem->base, file_priv,
335*4882a593Smuzhiyun 					   &args->handle);
336*4882a593Smuzhiyun 	if (ret) {
337*4882a593Smuzhiyun 		exynos_drm_gem_destroy(exynos_gem);
338*4882a593Smuzhiyun 		return ret;
339*4882a593Smuzhiyun 	}
340*4882a593Smuzhiyun 
341*4882a593Smuzhiyun 	return 0;
342*4882a593Smuzhiyun }
343*4882a593Smuzhiyun 
exynos_drm_gem_mmap_obj(struct drm_gem_object * obj,struct vm_area_struct * vma)344*4882a593Smuzhiyun static int exynos_drm_gem_mmap_obj(struct drm_gem_object *obj,
345*4882a593Smuzhiyun 				   struct vm_area_struct *vma)
346*4882a593Smuzhiyun {
347*4882a593Smuzhiyun 	struct exynos_drm_gem *exynos_gem = to_exynos_gem(obj);
348*4882a593Smuzhiyun 	int ret;
349*4882a593Smuzhiyun 
350*4882a593Smuzhiyun 	DRM_DEV_DEBUG_KMS(to_dma_dev(obj->dev), "flags = 0x%x\n",
351*4882a593Smuzhiyun 			  exynos_gem->flags);
352*4882a593Smuzhiyun 
353*4882a593Smuzhiyun 	/* non-cachable as default. */
354*4882a593Smuzhiyun 	if (exynos_gem->flags & EXYNOS_BO_CACHABLE)
355*4882a593Smuzhiyun 		vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
356*4882a593Smuzhiyun 	else if (exynos_gem->flags & EXYNOS_BO_WC)
357*4882a593Smuzhiyun 		vma->vm_page_prot =
358*4882a593Smuzhiyun 			pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
359*4882a593Smuzhiyun 	else
360*4882a593Smuzhiyun 		vma->vm_page_prot =
361*4882a593Smuzhiyun 			pgprot_noncached(vm_get_page_prot(vma->vm_flags));
362*4882a593Smuzhiyun 
363*4882a593Smuzhiyun 	ret = exynos_drm_gem_mmap_buffer(exynos_gem, vma);
364*4882a593Smuzhiyun 	if (ret)
365*4882a593Smuzhiyun 		goto err_close_vm;
366*4882a593Smuzhiyun 
367*4882a593Smuzhiyun 	return ret;
368*4882a593Smuzhiyun 
369*4882a593Smuzhiyun err_close_vm:
370*4882a593Smuzhiyun 	drm_gem_vm_close(vma);
371*4882a593Smuzhiyun 
372*4882a593Smuzhiyun 	return ret;
373*4882a593Smuzhiyun }
374*4882a593Smuzhiyun 
exynos_drm_gem_mmap(struct file * filp,struct vm_area_struct * vma)375*4882a593Smuzhiyun int exynos_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
376*4882a593Smuzhiyun {
377*4882a593Smuzhiyun 	struct drm_gem_object *obj;
378*4882a593Smuzhiyun 	int ret;
379*4882a593Smuzhiyun 
380*4882a593Smuzhiyun 	/* set vm_area_struct. */
381*4882a593Smuzhiyun 	ret = drm_gem_mmap(filp, vma);
382*4882a593Smuzhiyun 	if (ret < 0) {
383*4882a593Smuzhiyun 		DRM_ERROR("failed to mmap.\n");
384*4882a593Smuzhiyun 		return ret;
385*4882a593Smuzhiyun 	}
386*4882a593Smuzhiyun 
387*4882a593Smuzhiyun 	obj = vma->vm_private_data;
388*4882a593Smuzhiyun 
389*4882a593Smuzhiyun 	if (obj->import_attach)
390*4882a593Smuzhiyun 		return dma_buf_mmap(obj->dma_buf, vma, 0);
391*4882a593Smuzhiyun 
392*4882a593Smuzhiyun 	return exynos_drm_gem_mmap_obj(obj, vma);
393*4882a593Smuzhiyun }
394*4882a593Smuzhiyun 
395*4882a593Smuzhiyun /* low-level interface prime helpers */
exynos_drm_gem_prime_import(struct drm_device * dev,struct dma_buf * dma_buf)396*4882a593Smuzhiyun struct drm_gem_object *exynos_drm_gem_prime_import(struct drm_device *dev,
397*4882a593Smuzhiyun 					    struct dma_buf *dma_buf)
398*4882a593Smuzhiyun {
399*4882a593Smuzhiyun 	return drm_gem_prime_import_dev(dev, dma_buf, to_dma_dev(dev));
400*4882a593Smuzhiyun }
401*4882a593Smuzhiyun 
exynos_drm_gem_prime_get_sg_table(struct drm_gem_object * obj)402*4882a593Smuzhiyun struct sg_table *exynos_drm_gem_prime_get_sg_table(struct drm_gem_object *obj)
403*4882a593Smuzhiyun {
404*4882a593Smuzhiyun 	struct exynos_drm_gem *exynos_gem = to_exynos_gem(obj);
405*4882a593Smuzhiyun 	struct drm_device *drm_dev = obj->dev;
406*4882a593Smuzhiyun 	struct sg_table *sgt;
407*4882a593Smuzhiyun 	int ret;
408*4882a593Smuzhiyun 
409*4882a593Smuzhiyun 	sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
410*4882a593Smuzhiyun 	if (!sgt)
411*4882a593Smuzhiyun 		return ERR_PTR(-ENOMEM);
412*4882a593Smuzhiyun 
413*4882a593Smuzhiyun 	ret = dma_get_sgtable_attrs(to_dma_dev(drm_dev), sgt, exynos_gem->cookie,
414*4882a593Smuzhiyun 				    exynos_gem->dma_addr, exynos_gem->size,
415*4882a593Smuzhiyun 				    exynos_gem->dma_attrs);
416*4882a593Smuzhiyun 	if (ret) {
417*4882a593Smuzhiyun 		DRM_ERROR("failed to get sgtable, %d\n", ret);
418*4882a593Smuzhiyun 		kfree(sgt);
419*4882a593Smuzhiyun 		return ERR_PTR(ret);
420*4882a593Smuzhiyun 	}
421*4882a593Smuzhiyun 
422*4882a593Smuzhiyun 	return sgt;
423*4882a593Smuzhiyun }
424*4882a593Smuzhiyun 
425*4882a593Smuzhiyun struct drm_gem_object *
exynos_drm_gem_prime_import_sg_table(struct drm_device * dev,struct dma_buf_attachment * attach,struct sg_table * sgt)426*4882a593Smuzhiyun exynos_drm_gem_prime_import_sg_table(struct drm_device *dev,
427*4882a593Smuzhiyun 				     struct dma_buf_attachment *attach,
428*4882a593Smuzhiyun 				     struct sg_table *sgt)
429*4882a593Smuzhiyun {
430*4882a593Smuzhiyun 	struct exynos_drm_gem *exynos_gem;
431*4882a593Smuzhiyun 
432*4882a593Smuzhiyun 	/* check if the entries in the sg_table are contiguous */
433*4882a593Smuzhiyun 	if (drm_prime_get_contiguous_size(sgt) < attach->dmabuf->size) {
434*4882a593Smuzhiyun 		DRM_ERROR("buffer chunks must be mapped contiguously");
435*4882a593Smuzhiyun 		return ERR_PTR(-EINVAL);
436*4882a593Smuzhiyun 	}
437*4882a593Smuzhiyun 
438*4882a593Smuzhiyun 	exynos_gem = exynos_drm_gem_init(dev, attach->dmabuf->size);
439*4882a593Smuzhiyun 	if (IS_ERR(exynos_gem))
440*4882a593Smuzhiyun 		return ERR_CAST(exynos_gem);
441*4882a593Smuzhiyun 
442*4882a593Smuzhiyun 	/*
443*4882a593Smuzhiyun 	 * Buffer has been mapped as contiguous into DMA address space,
444*4882a593Smuzhiyun 	 * but if there is IOMMU, it can be either CONTIG or NONCONTIG.
445*4882a593Smuzhiyun 	 * We assume a simplified logic below:
446*4882a593Smuzhiyun 	 */
447*4882a593Smuzhiyun 	if (is_drm_iommu_supported(dev))
448*4882a593Smuzhiyun 		exynos_gem->flags |= EXYNOS_BO_NONCONTIG;
449*4882a593Smuzhiyun 	else
450*4882a593Smuzhiyun 		exynos_gem->flags |= EXYNOS_BO_CONTIG;
451*4882a593Smuzhiyun 
452*4882a593Smuzhiyun 	exynos_gem->dma_addr = sg_dma_address(sgt->sgl);
453*4882a593Smuzhiyun 	exynos_gem->sgt = sgt;
454*4882a593Smuzhiyun 	return &exynos_gem->base;
455*4882a593Smuzhiyun }
456*4882a593Smuzhiyun 
exynos_drm_gem_prime_vmap(struct drm_gem_object * obj)457*4882a593Smuzhiyun void *exynos_drm_gem_prime_vmap(struct drm_gem_object *obj)
458*4882a593Smuzhiyun {
459*4882a593Smuzhiyun 	return NULL;
460*4882a593Smuzhiyun }
461*4882a593Smuzhiyun 
exynos_drm_gem_prime_vunmap(struct drm_gem_object * obj,void * vaddr)462*4882a593Smuzhiyun void exynos_drm_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
463*4882a593Smuzhiyun {
464*4882a593Smuzhiyun 	/* Nothing to do */
465*4882a593Smuzhiyun }
466*4882a593Smuzhiyun 
exynos_drm_gem_prime_mmap(struct drm_gem_object * obj,struct vm_area_struct * vma)467*4882a593Smuzhiyun int exynos_drm_gem_prime_mmap(struct drm_gem_object *obj,
468*4882a593Smuzhiyun 			      struct vm_area_struct *vma)
469*4882a593Smuzhiyun {
470*4882a593Smuzhiyun 	int ret;
471*4882a593Smuzhiyun 
472*4882a593Smuzhiyun 	ret = drm_gem_mmap_obj(obj, obj->size, vma);
473*4882a593Smuzhiyun 	if (ret < 0)
474*4882a593Smuzhiyun 		return ret;
475*4882a593Smuzhiyun 
476*4882a593Smuzhiyun 	return exynos_drm_gem_mmap_obj(obj, vma);
477*4882a593Smuzhiyun }
478