xref: /OK3568_Linux_fs/kernel/drivers/gpu/drm/armada/armada_gem.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Copyright (C) 2012 Russell King
4*4882a593Smuzhiyun  */
5*4882a593Smuzhiyun 
6*4882a593Smuzhiyun #include <linux/dma-buf.h>
7*4882a593Smuzhiyun #include <linux/dma-mapping.h>
8*4882a593Smuzhiyun #include <linux/mman.h>
9*4882a593Smuzhiyun #include <linux/shmem_fs.h>
10*4882a593Smuzhiyun 
11*4882a593Smuzhiyun #include <drm/armada_drm.h>
12*4882a593Smuzhiyun #include <drm/drm_prime.h>
13*4882a593Smuzhiyun 
14*4882a593Smuzhiyun #include "armada_drm.h"
15*4882a593Smuzhiyun #include "armada_gem.h"
16*4882a593Smuzhiyun #include "armada_ioctlP.h"
17*4882a593Smuzhiyun 
armada_gem_vm_fault(struct vm_fault * vmf)18*4882a593Smuzhiyun static vm_fault_t armada_gem_vm_fault(struct vm_fault *vmf)
19*4882a593Smuzhiyun {
20*4882a593Smuzhiyun 	struct drm_gem_object *gobj = vmf->vma->vm_private_data;
21*4882a593Smuzhiyun 	struct armada_gem_object *obj = drm_to_armada_gem(gobj);
22*4882a593Smuzhiyun 	unsigned long pfn = obj->phys_addr >> PAGE_SHIFT;
23*4882a593Smuzhiyun 
24*4882a593Smuzhiyun 	pfn += (vmf->address - vmf->vma->vm_start) >> PAGE_SHIFT;
25*4882a593Smuzhiyun 	return vmf_insert_pfn(vmf->vma, vmf->address, pfn);
26*4882a593Smuzhiyun }
27*4882a593Smuzhiyun 
28*4882a593Smuzhiyun const struct vm_operations_struct armada_gem_vm_ops = {
29*4882a593Smuzhiyun 	.fault	= armada_gem_vm_fault,
30*4882a593Smuzhiyun 	.open	= drm_gem_vm_open,
31*4882a593Smuzhiyun 	.close	= drm_gem_vm_close,
32*4882a593Smuzhiyun };
33*4882a593Smuzhiyun 
roundup_gem_size(size_t size)34*4882a593Smuzhiyun static size_t roundup_gem_size(size_t size)
35*4882a593Smuzhiyun {
36*4882a593Smuzhiyun 	return roundup(size, PAGE_SIZE);
37*4882a593Smuzhiyun }
38*4882a593Smuzhiyun 
armada_gem_free_object(struct drm_gem_object * obj)39*4882a593Smuzhiyun void armada_gem_free_object(struct drm_gem_object *obj)
40*4882a593Smuzhiyun {
41*4882a593Smuzhiyun 	struct armada_gem_object *dobj = drm_to_armada_gem(obj);
42*4882a593Smuzhiyun 	struct armada_private *priv = drm_to_armada_dev(obj->dev);
43*4882a593Smuzhiyun 
44*4882a593Smuzhiyun 	DRM_DEBUG_DRIVER("release obj %p\n", dobj);
45*4882a593Smuzhiyun 
46*4882a593Smuzhiyun 	drm_gem_free_mmap_offset(&dobj->obj);
47*4882a593Smuzhiyun 
48*4882a593Smuzhiyun 	might_lock(&priv->linear_lock);
49*4882a593Smuzhiyun 
50*4882a593Smuzhiyun 	if (dobj->page) {
51*4882a593Smuzhiyun 		/* page backed memory */
52*4882a593Smuzhiyun 		unsigned int order = get_order(dobj->obj.size);
53*4882a593Smuzhiyun 		__free_pages(dobj->page, order);
54*4882a593Smuzhiyun 	} else if (dobj->linear) {
55*4882a593Smuzhiyun 		/* linear backed memory */
56*4882a593Smuzhiyun 		mutex_lock(&priv->linear_lock);
57*4882a593Smuzhiyun 		drm_mm_remove_node(dobj->linear);
58*4882a593Smuzhiyun 		mutex_unlock(&priv->linear_lock);
59*4882a593Smuzhiyun 		kfree(dobj->linear);
60*4882a593Smuzhiyun 		if (dobj->addr)
61*4882a593Smuzhiyun 			iounmap(dobj->addr);
62*4882a593Smuzhiyun 	}
63*4882a593Smuzhiyun 
64*4882a593Smuzhiyun 	if (dobj->obj.import_attach) {
65*4882a593Smuzhiyun 		/* We only ever display imported data */
66*4882a593Smuzhiyun 		if (dobj->sgt)
67*4882a593Smuzhiyun 			dma_buf_unmap_attachment(dobj->obj.import_attach,
68*4882a593Smuzhiyun 						 dobj->sgt, DMA_TO_DEVICE);
69*4882a593Smuzhiyun 		drm_prime_gem_destroy(&dobj->obj, NULL);
70*4882a593Smuzhiyun 	}
71*4882a593Smuzhiyun 
72*4882a593Smuzhiyun 	drm_gem_object_release(&dobj->obj);
73*4882a593Smuzhiyun 
74*4882a593Smuzhiyun 	kfree(dobj);
75*4882a593Smuzhiyun }
76*4882a593Smuzhiyun 
77*4882a593Smuzhiyun int
armada_gem_linear_back(struct drm_device * dev,struct armada_gem_object * obj)78*4882a593Smuzhiyun armada_gem_linear_back(struct drm_device *dev, struct armada_gem_object *obj)
79*4882a593Smuzhiyun {
80*4882a593Smuzhiyun 	struct armada_private *priv = drm_to_armada_dev(dev);
81*4882a593Smuzhiyun 	size_t size = obj->obj.size;
82*4882a593Smuzhiyun 
83*4882a593Smuzhiyun 	if (obj->page || obj->linear)
84*4882a593Smuzhiyun 		return 0;
85*4882a593Smuzhiyun 
86*4882a593Smuzhiyun 	/*
87*4882a593Smuzhiyun 	 * If it is a small allocation (typically cursor, which will
88*4882a593Smuzhiyun 	 * be 32x64 or 64x32 ARGB pixels) try to get it from the system.
89*4882a593Smuzhiyun 	 * Framebuffers will never be this small (our minimum size for
90*4882a593Smuzhiyun 	 * framebuffers is larger than this anyway.)  Such objects are
91*4882a593Smuzhiyun 	 * only accessed by the CPU so we don't need any special handing
92*4882a593Smuzhiyun 	 * here.
93*4882a593Smuzhiyun 	 */
94*4882a593Smuzhiyun 	if (size <= 8192) {
95*4882a593Smuzhiyun 		unsigned int order = get_order(size);
96*4882a593Smuzhiyun 		struct page *p = alloc_pages(GFP_KERNEL, order);
97*4882a593Smuzhiyun 
98*4882a593Smuzhiyun 		if (p) {
99*4882a593Smuzhiyun 			obj->addr = page_address(p);
100*4882a593Smuzhiyun 			obj->phys_addr = page_to_phys(p);
101*4882a593Smuzhiyun 			obj->page = p;
102*4882a593Smuzhiyun 
103*4882a593Smuzhiyun 			memset(obj->addr, 0, PAGE_ALIGN(size));
104*4882a593Smuzhiyun 		}
105*4882a593Smuzhiyun 	}
106*4882a593Smuzhiyun 
107*4882a593Smuzhiyun 	/*
108*4882a593Smuzhiyun 	 * We could grab something from CMA if it's enabled, but that
109*4882a593Smuzhiyun 	 * involves building in a problem:
110*4882a593Smuzhiyun 	 *
111*4882a593Smuzhiyun 	 * CMA's interface uses dma_alloc_coherent(), which provides us
112*4882a593Smuzhiyun 	 * with an CPU virtual address and a device address.
113*4882a593Smuzhiyun 	 *
114*4882a593Smuzhiyun 	 * The CPU virtual address may be either an address in the kernel
115*4882a593Smuzhiyun 	 * direct mapped region (for example, as it would be on x86) or
116*4882a593Smuzhiyun 	 * it may be remapped into another part of kernel memory space
117*4882a593Smuzhiyun 	 * (eg, as it would be on ARM.)  This means virt_to_phys() on the
118*4882a593Smuzhiyun 	 * returned virtual address is invalid depending on the architecture
119*4882a593Smuzhiyun 	 * implementation.
120*4882a593Smuzhiyun 	 *
121*4882a593Smuzhiyun 	 * The device address may also not be a physical address; it may
122*4882a593Smuzhiyun 	 * be that there is some kind of remapping between the device and
123*4882a593Smuzhiyun 	 * system RAM, which makes the use of the device address also
124*4882a593Smuzhiyun 	 * unsafe to re-use as a physical address.
125*4882a593Smuzhiyun 	 *
126*4882a593Smuzhiyun 	 * This makes DRM usage of dma_alloc_coherent() in a generic way
127*4882a593Smuzhiyun 	 * at best very questionable and unsafe.
128*4882a593Smuzhiyun 	 */
129*4882a593Smuzhiyun 
130*4882a593Smuzhiyun 	/* Otherwise, grab it from our linear allocation */
131*4882a593Smuzhiyun 	if (!obj->page) {
132*4882a593Smuzhiyun 		struct drm_mm_node *node;
133*4882a593Smuzhiyun 		unsigned align = min_t(unsigned, size, SZ_2M);
134*4882a593Smuzhiyun 		void __iomem *ptr;
135*4882a593Smuzhiyun 		int ret;
136*4882a593Smuzhiyun 
137*4882a593Smuzhiyun 		node = kzalloc(sizeof(*node), GFP_KERNEL);
138*4882a593Smuzhiyun 		if (!node)
139*4882a593Smuzhiyun 			return -ENOSPC;
140*4882a593Smuzhiyun 
141*4882a593Smuzhiyun 		mutex_lock(&priv->linear_lock);
142*4882a593Smuzhiyun 		ret = drm_mm_insert_node_generic(&priv->linear, node,
143*4882a593Smuzhiyun 						 size, align, 0, 0);
144*4882a593Smuzhiyun 		mutex_unlock(&priv->linear_lock);
145*4882a593Smuzhiyun 		if (ret) {
146*4882a593Smuzhiyun 			kfree(node);
147*4882a593Smuzhiyun 			return ret;
148*4882a593Smuzhiyun 		}
149*4882a593Smuzhiyun 
150*4882a593Smuzhiyun 		obj->linear = node;
151*4882a593Smuzhiyun 
152*4882a593Smuzhiyun 		/* Ensure that the memory we're returning is cleared. */
153*4882a593Smuzhiyun 		ptr = ioremap_wc(obj->linear->start, size);
154*4882a593Smuzhiyun 		if (!ptr) {
155*4882a593Smuzhiyun 			mutex_lock(&priv->linear_lock);
156*4882a593Smuzhiyun 			drm_mm_remove_node(obj->linear);
157*4882a593Smuzhiyun 			mutex_unlock(&priv->linear_lock);
158*4882a593Smuzhiyun 			kfree(obj->linear);
159*4882a593Smuzhiyun 			obj->linear = NULL;
160*4882a593Smuzhiyun 			return -ENOMEM;
161*4882a593Smuzhiyun 		}
162*4882a593Smuzhiyun 
163*4882a593Smuzhiyun 		memset_io(ptr, 0, size);
164*4882a593Smuzhiyun 		iounmap(ptr);
165*4882a593Smuzhiyun 
166*4882a593Smuzhiyun 		obj->phys_addr = obj->linear->start;
167*4882a593Smuzhiyun 		obj->dev_addr = obj->linear->start;
168*4882a593Smuzhiyun 		obj->mapped = true;
169*4882a593Smuzhiyun 	}
170*4882a593Smuzhiyun 
171*4882a593Smuzhiyun 	DRM_DEBUG_DRIVER("obj %p phys %#llx dev %#llx\n", obj,
172*4882a593Smuzhiyun 			 (unsigned long long)obj->phys_addr,
173*4882a593Smuzhiyun 			 (unsigned long long)obj->dev_addr);
174*4882a593Smuzhiyun 
175*4882a593Smuzhiyun 	return 0;
176*4882a593Smuzhiyun }
177*4882a593Smuzhiyun 
178*4882a593Smuzhiyun void *
armada_gem_map_object(struct drm_device * dev,struct armada_gem_object * dobj)179*4882a593Smuzhiyun armada_gem_map_object(struct drm_device *dev, struct armada_gem_object *dobj)
180*4882a593Smuzhiyun {
181*4882a593Smuzhiyun 	/* only linear objects need to be ioremap'd */
182*4882a593Smuzhiyun 	if (!dobj->addr && dobj->linear)
183*4882a593Smuzhiyun 		dobj->addr = ioremap_wc(dobj->phys_addr, dobj->obj.size);
184*4882a593Smuzhiyun 	return dobj->addr;
185*4882a593Smuzhiyun }
186*4882a593Smuzhiyun 
187*4882a593Smuzhiyun struct armada_gem_object *
armada_gem_alloc_private_object(struct drm_device * dev,size_t size)188*4882a593Smuzhiyun armada_gem_alloc_private_object(struct drm_device *dev, size_t size)
189*4882a593Smuzhiyun {
190*4882a593Smuzhiyun 	struct armada_gem_object *obj;
191*4882a593Smuzhiyun 
192*4882a593Smuzhiyun 	size = roundup_gem_size(size);
193*4882a593Smuzhiyun 
194*4882a593Smuzhiyun 	obj = kzalloc(sizeof(*obj), GFP_KERNEL);
195*4882a593Smuzhiyun 	if (!obj)
196*4882a593Smuzhiyun 		return NULL;
197*4882a593Smuzhiyun 
198*4882a593Smuzhiyun 	drm_gem_private_object_init(dev, &obj->obj, size);
199*4882a593Smuzhiyun 
200*4882a593Smuzhiyun 	DRM_DEBUG_DRIVER("alloc private obj %p size %zu\n", obj, size);
201*4882a593Smuzhiyun 
202*4882a593Smuzhiyun 	return obj;
203*4882a593Smuzhiyun }
204*4882a593Smuzhiyun 
armada_gem_alloc_object(struct drm_device * dev,size_t size)205*4882a593Smuzhiyun static struct armada_gem_object *armada_gem_alloc_object(struct drm_device *dev,
206*4882a593Smuzhiyun 	size_t size)
207*4882a593Smuzhiyun {
208*4882a593Smuzhiyun 	struct armada_gem_object *obj;
209*4882a593Smuzhiyun 	struct address_space *mapping;
210*4882a593Smuzhiyun 
211*4882a593Smuzhiyun 	size = roundup_gem_size(size);
212*4882a593Smuzhiyun 
213*4882a593Smuzhiyun 	obj = kzalloc(sizeof(*obj), GFP_KERNEL);
214*4882a593Smuzhiyun 	if (!obj)
215*4882a593Smuzhiyun 		return NULL;
216*4882a593Smuzhiyun 
217*4882a593Smuzhiyun 	if (drm_gem_object_init(dev, &obj->obj, size)) {
218*4882a593Smuzhiyun 		kfree(obj);
219*4882a593Smuzhiyun 		return NULL;
220*4882a593Smuzhiyun 	}
221*4882a593Smuzhiyun 
222*4882a593Smuzhiyun 	mapping = obj->obj.filp->f_mapping;
223*4882a593Smuzhiyun 	mapping_set_gfp_mask(mapping, GFP_HIGHUSER | __GFP_RECLAIMABLE);
224*4882a593Smuzhiyun 
225*4882a593Smuzhiyun 	DRM_DEBUG_DRIVER("alloc obj %p size %zu\n", obj, size);
226*4882a593Smuzhiyun 
227*4882a593Smuzhiyun 	return obj;
228*4882a593Smuzhiyun }
229*4882a593Smuzhiyun 
230*4882a593Smuzhiyun /* Dumb alloc support */
armada_gem_dumb_create(struct drm_file * file,struct drm_device * dev,struct drm_mode_create_dumb * args)231*4882a593Smuzhiyun int armada_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
232*4882a593Smuzhiyun 	struct drm_mode_create_dumb *args)
233*4882a593Smuzhiyun {
234*4882a593Smuzhiyun 	struct armada_gem_object *dobj;
235*4882a593Smuzhiyun 	u32 handle;
236*4882a593Smuzhiyun 	size_t size;
237*4882a593Smuzhiyun 	int ret;
238*4882a593Smuzhiyun 
239*4882a593Smuzhiyun 	args->pitch = armada_pitch(args->width, args->bpp);
240*4882a593Smuzhiyun 	args->size = size = args->pitch * args->height;
241*4882a593Smuzhiyun 
242*4882a593Smuzhiyun 	dobj = armada_gem_alloc_private_object(dev, size);
243*4882a593Smuzhiyun 	if (dobj == NULL)
244*4882a593Smuzhiyun 		return -ENOMEM;
245*4882a593Smuzhiyun 
246*4882a593Smuzhiyun 	ret = armada_gem_linear_back(dev, dobj);
247*4882a593Smuzhiyun 	if (ret)
248*4882a593Smuzhiyun 		goto err;
249*4882a593Smuzhiyun 
250*4882a593Smuzhiyun 	ret = drm_gem_handle_create(file, &dobj->obj, &handle);
251*4882a593Smuzhiyun 	if (ret)
252*4882a593Smuzhiyun 		goto err;
253*4882a593Smuzhiyun 
254*4882a593Smuzhiyun 	args->handle = handle;
255*4882a593Smuzhiyun 
256*4882a593Smuzhiyun 	/* drop reference from allocate - handle holds it now */
257*4882a593Smuzhiyun 	DRM_DEBUG_DRIVER("obj %p size %zu handle %#x\n", dobj, size, handle);
258*4882a593Smuzhiyun  err:
259*4882a593Smuzhiyun 	drm_gem_object_put(&dobj->obj);
260*4882a593Smuzhiyun 	return ret;
261*4882a593Smuzhiyun }
262*4882a593Smuzhiyun 
263*4882a593Smuzhiyun /* Private driver gem ioctls */
armada_gem_create_ioctl(struct drm_device * dev,void * data,struct drm_file * file)264*4882a593Smuzhiyun int armada_gem_create_ioctl(struct drm_device *dev, void *data,
265*4882a593Smuzhiyun 	struct drm_file *file)
266*4882a593Smuzhiyun {
267*4882a593Smuzhiyun 	struct drm_armada_gem_create *args = data;
268*4882a593Smuzhiyun 	struct armada_gem_object *dobj;
269*4882a593Smuzhiyun 	size_t size;
270*4882a593Smuzhiyun 	u32 handle;
271*4882a593Smuzhiyun 	int ret;
272*4882a593Smuzhiyun 
273*4882a593Smuzhiyun 	if (args->size == 0)
274*4882a593Smuzhiyun 		return -ENOMEM;
275*4882a593Smuzhiyun 
276*4882a593Smuzhiyun 	size = args->size;
277*4882a593Smuzhiyun 
278*4882a593Smuzhiyun 	dobj = armada_gem_alloc_object(dev, size);
279*4882a593Smuzhiyun 	if (dobj == NULL)
280*4882a593Smuzhiyun 		return -ENOMEM;
281*4882a593Smuzhiyun 
282*4882a593Smuzhiyun 	ret = drm_gem_handle_create(file, &dobj->obj, &handle);
283*4882a593Smuzhiyun 	if (ret)
284*4882a593Smuzhiyun 		goto err;
285*4882a593Smuzhiyun 
286*4882a593Smuzhiyun 	args->handle = handle;
287*4882a593Smuzhiyun 
288*4882a593Smuzhiyun 	/* drop reference from allocate - handle holds it now */
289*4882a593Smuzhiyun 	DRM_DEBUG_DRIVER("obj %p size %zu handle %#x\n", dobj, size, handle);
290*4882a593Smuzhiyun  err:
291*4882a593Smuzhiyun 	drm_gem_object_put(&dobj->obj);
292*4882a593Smuzhiyun 	return ret;
293*4882a593Smuzhiyun }
294*4882a593Smuzhiyun 
295*4882a593Smuzhiyun /* Map a shmem-backed object into process memory space */
armada_gem_mmap_ioctl(struct drm_device * dev,void * data,struct drm_file * file)296*4882a593Smuzhiyun int armada_gem_mmap_ioctl(struct drm_device *dev, void *data,
297*4882a593Smuzhiyun 	struct drm_file *file)
298*4882a593Smuzhiyun {
299*4882a593Smuzhiyun 	struct drm_armada_gem_mmap *args = data;
300*4882a593Smuzhiyun 	struct armada_gem_object *dobj;
301*4882a593Smuzhiyun 	unsigned long addr;
302*4882a593Smuzhiyun 
303*4882a593Smuzhiyun 	dobj = armada_gem_object_lookup(file, args->handle);
304*4882a593Smuzhiyun 	if (dobj == NULL)
305*4882a593Smuzhiyun 		return -ENOENT;
306*4882a593Smuzhiyun 
307*4882a593Smuzhiyun 	if (!dobj->obj.filp) {
308*4882a593Smuzhiyun 		drm_gem_object_put(&dobj->obj);
309*4882a593Smuzhiyun 		return -EINVAL;
310*4882a593Smuzhiyun 	}
311*4882a593Smuzhiyun 
312*4882a593Smuzhiyun 	addr = vm_mmap(dobj->obj.filp, 0, args->size, PROT_READ | PROT_WRITE,
313*4882a593Smuzhiyun 		       MAP_SHARED, args->offset);
314*4882a593Smuzhiyun 	drm_gem_object_put(&dobj->obj);
315*4882a593Smuzhiyun 	if (IS_ERR_VALUE(addr))
316*4882a593Smuzhiyun 		return addr;
317*4882a593Smuzhiyun 
318*4882a593Smuzhiyun 	args->addr = addr;
319*4882a593Smuzhiyun 
320*4882a593Smuzhiyun 	return 0;
321*4882a593Smuzhiyun }
322*4882a593Smuzhiyun 
armada_gem_pwrite_ioctl(struct drm_device * dev,void * data,struct drm_file * file)323*4882a593Smuzhiyun int armada_gem_pwrite_ioctl(struct drm_device *dev, void *data,
324*4882a593Smuzhiyun 	struct drm_file *file)
325*4882a593Smuzhiyun {
326*4882a593Smuzhiyun 	struct drm_armada_gem_pwrite *args = data;
327*4882a593Smuzhiyun 	struct armada_gem_object *dobj;
328*4882a593Smuzhiyun 	char __user *ptr;
329*4882a593Smuzhiyun 	int ret;
330*4882a593Smuzhiyun 
331*4882a593Smuzhiyun 	DRM_DEBUG_DRIVER("handle %u off %u size %u ptr 0x%llx\n",
332*4882a593Smuzhiyun 		args->handle, args->offset, args->size, args->ptr);
333*4882a593Smuzhiyun 
334*4882a593Smuzhiyun 	if (args->size == 0)
335*4882a593Smuzhiyun 		return 0;
336*4882a593Smuzhiyun 
337*4882a593Smuzhiyun 	ptr = (char __user *)(uintptr_t)args->ptr;
338*4882a593Smuzhiyun 
339*4882a593Smuzhiyun 	if (!access_ok(ptr, args->size))
340*4882a593Smuzhiyun 		return -EFAULT;
341*4882a593Smuzhiyun 
342*4882a593Smuzhiyun 	ret = fault_in_pages_readable(ptr, args->size);
343*4882a593Smuzhiyun 	if (ret)
344*4882a593Smuzhiyun 		return ret;
345*4882a593Smuzhiyun 
346*4882a593Smuzhiyun 	dobj = armada_gem_object_lookup(file, args->handle);
347*4882a593Smuzhiyun 	if (dobj == NULL)
348*4882a593Smuzhiyun 		return -ENOENT;
349*4882a593Smuzhiyun 
350*4882a593Smuzhiyun 	/* Must be a kernel-mapped object */
351*4882a593Smuzhiyun 	if (!dobj->addr)
352*4882a593Smuzhiyun 		return -EINVAL;
353*4882a593Smuzhiyun 
354*4882a593Smuzhiyun 	if (args->offset > dobj->obj.size ||
355*4882a593Smuzhiyun 	    args->size > dobj->obj.size - args->offset) {
356*4882a593Smuzhiyun 		DRM_ERROR("invalid size: object size %u\n", dobj->obj.size);
357*4882a593Smuzhiyun 		ret = -EINVAL;
358*4882a593Smuzhiyun 		goto unref;
359*4882a593Smuzhiyun 	}
360*4882a593Smuzhiyun 
361*4882a593Smuzhiyun 	if (copy_from_user(dobj->addr + args->offset, ptr, args->size)) {
362*4882a593Smuzhiyun 		ret = -EFAULT;
363*4882a593Smuzhiyun 	} else if (dobj->update) {
364*4882a593Smuzhiyun 		dobj->update(dobj->update_data);
365*4882a593Smuzhiyun 		ret = 0;
366*4882a593Smuzhiyun 	}
367*4882a593Smuzhiyun 
368*4882a593Smuzhiyun  unref:
369*4882a593Smuzhiyun 	drm_gem_object_put(&dobj->obj);
370*4882a593Smuzhiyun 	return ret;
371*4882a593Smuzhiyun }
372*4882a593Smuzhiyun 
373*4882a593Smuzhiyun /* Prime support */
374*4882a593Smuzhiyun static struct sg_table *
armada_gem_prime_map_dma_buf(struct dma_buf_attachment * attach,enum dma_data_direction dir)375*4882a593Smuzhiyun armada_gem_prime_map_dma_buf(struct dma_buf_attachment *attach,
376*4882a593Smuzhiyun 	enum dma_data_direction dir)
377*4882a593Smuzhiyun {
378*4882a593Smuzhiyun 	struct drm_gem_object *obj = attach->dmabuf->priv;
379*4882a593Smuzhiyun 	struct armada_gem_object *dobj = drm_to_armada_gem(obj);
380*4882a593Smuzhiyun 	struct scatterlist *sg;
381*4882a593Smuzhiyun 	struct sg_table *sgt;
382*4882a593Smuzhiyun 	int i;
383*4882a593Smuzhiyun 
384*4882a593Smuzhiyun 	sgt = kmalloc(sizeof(*sgt), GFP_KERNEL);
385*4882a593Smuzhiyun 	if (!sgt)
386*4882a593Smuzhiyun 		return NULL;
387*4882a593Smuzhiyun 
388*4882a593Smuzhiyun 	if (dobj->obj.filp) {
389*4882a593Smuzhiyun 		struct address_space *mapping;
390*4882a593Smuzhiyun 		int count;
391*4882a593Smuzhiyun 
392*4882a593Smuzhiyun 		count = dobj->obj.size / PAGE_SIZE;
393*4882a593Smuzhiyun 		if (sg_alloc_table(sgt, count, GFP_KERNEL))
394*4882a593Smuzhiyun 			goto free_sgt;
395*4882a593Smuzhiyun 
396*4882a593Smuzhiyun 		mapping = dobj->obj.filp->f_mapping;
397*4882a593Smuzhiyun 
398*4882a593Smuzhiyun 		for_each_sgtable_sg(sgt, sg, i) {
399*4882a593Smuzhiyun 			struct page *page;
400*4882a593Smuzhiyun 
401*4882a593Smuzhiyun 			page = shmem_read_mapping_page(mapping, i);
402*4882a593Smuzhiyun 			if (IS_ERR(page))
403*4882a593Smuzhiyun 				goto release;
404*4882a593Smuzhiyun 
405*4882a593Smuzhiyun 			sg_set_page(sg, page, PAGE_SIZE, 0);
406*4882a593Smuzhiyun 		}
407*4882a593Smuzhiyun 
408*4882a593Smuzhiyun 		if (dma_map_sgtable(attach->dev, sgt, dir, 0))
409*4882a593Smuzhiyun 			goto release;
410*4882a593Smuzhiyun 	} else if (dobj->page) {
411*4882a593Smuzhiyun 		/* Single contiguous page */
412*4882a593Smuzhiyun 		if (sg_alloc_table(sgt, 1, GFP_KERNEL))
413*4882a593Smuzhiyun 			goto free_sgt;
414*4882a593Smuzhiyun 
415*4882a593Smuzhiyun 		sg_set_page(sgt->sgl, dobj->page, dobj->obj.size, 0);
416*4882a593Smuzhiyun 
417*4882a593Smuzhiyun 		if (dma_map_sgtable(attach->dev, sgt, dir, 0))
418*4882a593Smuzhiyun 			goto free_table;
419*4882a593Smuzhiyun 	} else if (dobj->linear) {
420*4882a593Smuzhiyun 		/* Single contiguous physical region - no struct page */
421*4882a593Smuzhiyun 		if (sg_alloc_table(sgt, 1, GFP_KERNEL))
422*4882a593Smuzhiyun 			goto free_sgt;
423*4882a593Smuzhiyun 		sg_dma_address(sgt->sgl) = dobj->dev_addr;
424*4882a593Smuzhiyun 		sg_dma_len(sgt->sgl) = dobj->obj.size;
425*4882a593Smuzhiyun 	} else {
426*4882a593Smuzhiyun 		goto free_sgt;
427*4882a593Smuzhiyun 	}
428*4882a593Smuzhiyun 	return sgt;
429*4882a593Smuzhiyun 
430*4882a593Smuzhiyun  release:
431*4882a593Smuzhiyun 	for_each_sgtable_sg(sgt, sg, i)
432*4882a593Smuzhiyun 		if (sg_page(sg))
433*4882a593Smuzhiyun 			put_page(sg_page(sg));
434*4882a593Smuzhiyun  free_table:
435*4882a593Smuzhiyun 	sg_free_table(sgt);
436*4882a593Smuzhiyun  free_sgt:
437*4882a593Smuzhiyun 	kfree(sgt);
438*4882a593Smuzhiyun 	return NULL;
439*4882a593Smuzhiyun }
440*4882a593Smuzhiyun 
armada_gem_prime_unmap_dma_buf(struct dma_buf_attachment * attach,struct sg_table * sgt,enum dma_data_direction dir)441*4882a593Smuzhiyun static void armada_gem_prime_unmap_dma_buf(struct dma_buf_attachment *attach,
442*4882a593Smuzhiyun 	struct sg_table *sgt, enum dma_data_direction dir)
443*4882a593Smuzhiyun {
444*4882a593Smuzhiyun 	struct drm_gem_object *obj = attach->dmabuf->priv;
445*4882a593Smuzhiyun 	struct armada_gem_object *dobj = drm_to_armada_gem(obj);
446*4882a593Smuzhiyun 	int i;
447*4882a593Smuzhiyun 
448*4882a593Smuzhiyun 	if (!dobj->linear)
449*4882a593Smuzhiyun 		dma_unmap_sgtable(attach->dev, sgt, dir, 0);
450*4882a593Smuzhiyun 
451*4882a593Smuzhiyun 	if (dobj->obj.filp) {
452*4882a593Smuzhiyun 		struct scatterlist *sg;
453*4882a593Smuzhiyun 
454*4882a593Smuzhiyun 		for_each_sgtable_sg(sgt, sg, i)
455*4882a593Smuzhiyun 			put_page(sg_page(sg));
456*4882a593Smuzhiyun 	}
457*4882a593Smuzhiyun 
458*4882a593Smuzhiyun 	sg_free_table(sgt);
459*4882a593Smuzhiyun 	kfree(sgt);
460*4882a593Smuzhiyun }
461*4882a593Smuzhiyun 
462*4882a593Smuzhiyun static int
armada_gem_dmabuf_mmap(struct dma_buf * buf,struct vm_area_struct * vma)463*4882a593Smuzhiyun armada_gem_dmabuf_mmap(struct dma_buf *buf, struct vm_area_struct *vma)
464*4882a593Smuzhiyun {
465*4882a593Smuzhiyun 	return -EINVAL;
466*4882a593Smuzhiyun }
467*4882a593Smuzhiyun 
468*4882a593Smuzhiyun static const struct dma_buf_ops armada_gem_prime_dmabuf_ops = {
469*4882a593Smuzhiyun 	.map_dma_buf	= armada_gem_prime_map_dma_buf,
470*4882a593Smuzhiyun 	.unmap_dma_buf	= armada_gem_prime_unmap_dma_buf,
471*4882a593Smuzhiyun 	.release	= drm_gem_dmabuf_release,
472*4882a593Smuzhiyun 	.mmap		= armada_gem_dmabuf_mmap,
473*4882a593Smuzhiyun };
474*4882a593Smuzhiyun 
475*4882a593Smuzhiyun struct dma_buf *
armada_gem_prime_export(struct drm_gem_object * obj,int flags)476*4882a593Smuzhiyun armada_gem_prime_export(struct drm_gem_object *obj, int flags)
477*4882a593Smuzhiyun {
478*4882a593Smuzhiyun 	DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
479*4882a593Smuzhiyun 
480*4882a593Smuzhiyun 	exp_info.ops = &armada_gem_prime_dmabuf_ops;
481*4882a593Smuzhiyun 	exp_info.size = obj->size;
482*4882a593Smuzhiyun 	exp_info.flags = O_RDWR;
483*4882a593Smuzhiyun 	exp_info.priv = obj;
484*4882a593Smuzhiyun 
485*4882a593Smuzhiyun 	return drm_gem_dmabuf_export(obj->dev, &exp_info);
486*4882a593Smuzhiyun }
487*4882a593Smuzhiyun 
488*4882a593Smuzhiyun struct drm_gem_object *
armada_gem_prime_import(struct drm_device * dev,struct dma_buf * buf)489*4882a593Smuzhiyun armada_gem_prime_import(struct drm_device *dev, struct dma_buf *buf)
490*4882a593Smuzhiyun {
491*4882a593Smuzhiyun 	struct dma_buf_attachment *attach;
492*4882a593Smuzhiyun 	struct armada_gem_object *dobj;
493*4882a593Smuzhiyun 
494*4882a593Smuzhiyun 	if (buf->ops == &armada_gem_prime_dmabuf_ops) {
495*4882a593Smuzhiyun 		struct drm_gem_object *obj = buf->priv;
496*4882a593Smuzhiyun 		if (obj->dev == dev) {
497*4882a593Smuzhiyun 			/*
498*4882a593Smuzhiyun 			 * Importing our own dmabuf(s) increases the
499*4882a593Smuzhiyun 			 * refcount on the gem object itself.
500*4882a593Smuzhiyun 			 */
501*4882a593Smuzhiyun 			drm_gem_object_get(obj);
502*4882a593Smuzhiyun 			return obj;
503*4882a593Smuzhiyun 		}
504*4882a593Smuzhiyun 	}
505*4882a593Smuzhiyun 
506*4882a593Smuzhiyun 	attach = dma_buf_attach(buf, dev->dev);
507*4882a593Smuzhiyun 	if (IS_ERR(attach))
508*4882a593Smuzhiyun 		return ERR_CAST(attach);
509*4882a593Smuzhiyun 
510*4882a593Smuzhiyun 	dobj = armada_gem_alloc_private_object(dev, buf->size);
511*4882a593Smuzhiyun 	if (!dobj) {
512*4882a593Smuzhiyun 		dma_buf_detach(buf, attach);
513*4882a593Smuzhiyun 		return ERR_PTR(-ENOMEM);
514*4882a593Smuzhiyun 	}
515*4882a593Smuzhiyun 
516*4882a593Smuzhiyun 	dobj->obj.import_attach = attach;
517*4882a593Smuzhiyun 	get_dma_buf(buf);
518*4882a593Smuzhiyun 
519*4882a593Smuzhiyun 	/*
520*4882a593Smuzhiyun 	 * Don't call dma_buf_map_attachment() here - it maps the
521*4882a593Smuzhiyun 	 * scatterlist immediately for DMA, and this is not always
522*4882a593Smuzhiyun 	 * an appropriate thing to do.
523*4882a593Smuzhiyun 	 */
524*4882a593Smuzhiyun 	return &dobj->obj;
525*4882a593Smuzhiyun }
526*4882a593Smuzhiyun 
armada_gem_map_import(struct armada_gem_object * dobj)527*4882a593Smuzhiyun int armada_gem_map_import(struct armada_gem_object *dobj)
528*4882a593Smuzhiyun {
529*4882a593Smuzhiyun 	int ret;
530*4882a593Smuzhiyun 
531*4882a593Smuzhiyun 	dobj->sgt = dma_buf_map_attachment(dobj->obj.import_attach,
532*4882a593Smuzhiyun 					   DMA_TO_DEVICE);
533*4882a593Smuzhiyun 	if (IS_ERR(dobj->sgt)) {
534*4882a593Smuzhiyun 		ret = PTR_ERR(dobj->sgt);
535*4882a593Smuzhiyun 		dobj->sgt = NULL;
536*4882a593Smuzhiyun 		DRM_ERROR("dma_buf_map_attachment() error: %d\n", ret);
537*4882a593Smuzhiyun 		return ret;
538*4882a593Smuzhiyun 	}
539*4882a593Smuzhiyun 	if (dobj->sgt->nents > 1) {
540*4882a593Smuzhiyun 		DRM_ERROR("dma_buf_map_attachment() returned an (unsupported) scattered list\n");
541*4882a593Smuzhiyun 		return -EINVAL;
542*4882a593Smuzhiyun 	}
543*4882a593Smuzhiyun 	if (sg_dma_len(dobj->sgt->sgl) < dobj->obj.size) {
544*4882a593Smuzhiyun 		DRM_ERROR("dma_buf_map_attachment() returned a small buffer\n");
545*4882a593Smuzhiyun 		return -EINVAL;
546*4882a593Smuzhiyun 	}
547*4882a593Smuzhiyun 	dobj->dev_addr = sg_dma_address(dobj->sgt->sgl);
548*4882a593Smuzhiyun 	dobj->mapped = true;
549*4882a593Smuzhiyun 	return 0;
550*4882a593Smuzhiyun }
551