xref: /OK3568_Linux_fs/kernel/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Copyright (C) 2011 Texas Instruments Incorporated - http://www.ti.com/
4*4882a593Smuzhiyun  * Author: Rob Clark <rob.clark@linaro.org>
5*4882a593Smuzhiyun  */
6*4882a593Smuzhiyun 
7*4882a593Smuzhiyun #include <linux/dma-buf.h>
8*4882a593Smuzhiyun #include <linux/highmem.h>
9*4882a593Smuzhiyun 
10*4882a593Smuzhiyun #include <drm/drm_prime.h>
11*4882a593Smuzhiyun 
12*4882a593Smuzhiyun #include "omap_drv.h"
13*4882a593Smuzhiyun 
14*4882a593Smuzhiyun /* -----------------------------------------------------------------------------
15*4882a593Smuzhiyun  * DMABUF Export
16*4882a593Smuzhiyun  */
17*4882a593Smuzhiyun 
omap_gem_map_dma_buf(struct dma_buf_attachment * attachment,enum dma_data_direction dir)18*4882a593Smuzhiyun static struct sg_table *omap_gem_map_dma_buf(
19*4882a593Smuzhiyun 		struct dma_buf_attachment *attachment,
20*4882a593Smuzhiyun 		enum dma_data_direction dir)
21*4882a593Smuzhiyun {
22*4882a593Smuzhiyun 	struct drm_gem_object *obj = attachment->dmabuf->priv;
23*4882a593Smuzhiyun 	struct sg_table *sg;
24*4882a593Smuzhiyun 	dma_addr_t dma_addr;
25*4882a593Smuzhiyun 	int ret;
26*4882a593Smuzhiyun 
27*4882a593Smuzhiyun 	sg = kzalloc(sizeof(*sg), GFP_KERNEL);
28*4882a593Smuzhiyun 	if (!sg)
29*4882a593Smuzhiyun 		return ERR_PTR(-ENOMEM);
30*4882a593Smuzhiyun 
31*4882a593Smuzhiyun 	/* camera, etc, need physically contiguous.. but we need a
32*4882a593Smuzhiyun 	 * better way to know this..
33*4882a593Smuzhiyun 	 */
34*4882a593Smuzhiyun 	ret = omap_gem_pin(obj, &dma_addr);
35*4882a593Smuzhiyun 	if (ret)
36*4882a593Smuzhiyun 		goto out;
37*4882a593Smuzhiyun 
38*4882a593Smuzhiyun 	ret = sg_alloc_table(sg, 1, GFP_KERNEL);
39*4882a593Smuzhiyun 	if (ret)
40*4882a593Smuzhiyun 		goto out;
41*4882a593Smuzhiyun 
42*4882a593Smuzhiyun 	sg_init_table(sg->sgl, 1);
43*4882a593Smuzhiyun 	sg_dma_len(sg->sgl) = obj->size;
44*4882a593Smuzhiyun 	sg_set_page(sg->sgl, pfn_to_page(PFN_DOWN(dma_addr)), obj->size, 0);
45*4882a593Smuzhiyun 	sg_dma_address(sg->sgl) = dma_addr;
46*4882a593Smuzhiyun 
47*4882a593Smuzhiyun 	/* this must be after omap_gem_pin() to ensure we have pages attached */
48*4882a593Smuzhiyun 	omap_gem_dma_sync_buffer(obj, dir);
49*4882a593Smuzhiyun 
50*4882a593Smuzhiyun 	return sg;
51*4882a593Smuzhiyun out:
52*4882a593Smuzhiyun 	kfree(sg);
53*4882a593Smuzhiyun 	return ERR_PTR(ret);
54*4882a593Smuzhiyun }
55*4882a593Smuzhiyun 
omap_gem_unmap_dma_buf(struct dma_buf_attachment * attachment,struct sg_table * sg,enum dma_data_direction dir)56*4882a593Smuzhiyun static void omap_gem_unmap_dma_buf(struct dma_buf_attachment *attachment,
57*4882a593Smuzhiyun 		struct sg_table *sg, enum dma_data_direction dir)
58*4882a593Smuzhiyun {
59*4882a593Smuzhiyun 	struct drm_gem_object *obj = attachment->dmabuf->priv;
60*4882a593Smuzhiyun 	omap_gem_unpin(obj);
61*4882a593Smuzhiyun 	sg_free_table(sg);
62*4882a593Smuzhiyun 	kfree(sg);
63*4882a593Smuzhiyun }
64*4882a593Smuzhiyun 
omap_gem_dmabuf_begin_cpu_access(struct dma_buf * buffer,enum dma_data_direction dir)65*4882a593Smuzhiyun static int omap_gem_dmabuf_begin_cpu_access(struct dma_buf *buffer,
66*4882a593Smuzhiyun 		enum dma_data_direction dir)
67*4882a593Smuzhiyun {
68*4882a593Smuzhiyun 	struct drm_gem_object *obj = buffer->priv;
69*4882a593Smuzhiyun 	struct page **pages;
70*4882a593Smuzhiyun 	if (omap_gem_flags(obj) & OMAP_BO_TILED_MASK) {
71*4882a593Smuzhiyun 		/* TODO we would need to pin at least part of the buffer to
72*4882a593Smuzhiyun 		 * get de-tiled view.  For now just reject it.
73*4882a593Smuzhiyun 		 */
74*4882a593Smuzhiyun 		return -ENOMEM;
75*4882a593Smuzhiyun 	}
76*4882a593Smuzhiyun 	/* make sure we have the pages: */
77*4882a593Smuzhiyun 	return omap_gem_get_pages(obj, &pages, true);
78*4882a593Smuzhiyun }
79*4882a593Smuzhiyun 
omap_gem_dmabuf_end_cpu_access(struct dma_buf * buffer,enum dma_data_direction dir)80*4882a593Smuzhiyun static int omap_gem_dmabuf_end_cpu_access(struct dma_buf *buffer,
81*4882a593Smuzhiyun 					  enum dma_data_direction dir)
82*4882a593Smuzhiyun {
83*4882a593Smuzhiyun 	struct drm_gem_object *obj = buffer->priv;
84*4882a593Smuzhiyun 	omap_gem_put_pages(obj);
85*4882a593Smuzhiyun 	return 0;
86*4882a593Smuzhiyun }
87*4882a593Smuzhiyun 
omap_gem_dmabuf_mmap(struct dma_buf * buffer,struct vm_area_struct * vma)88*4882a593Smuzhiyun static int omap_gem_dmabuf_mmap(struct dma_buf *buffer,
89*4882a593Smuzhiyun 		struct vm_area_struct *vma)
90*4882a593Smuzhiyun {
91*4882a593Smuzhiyun 	struct drm_gem_object *obj = buffer->priv;
92*4882a593Smuzhiyun 	int ret = 0;
93*4882a593Smuzhiyun 
94*4882a593Smuzhiyun 	ret = drm_gem_mmap_obj(obj, omap_gem_mmap_size(obj), vma);
95*4882a593Smuzhiyun 	if (ret < 0)
96*4882a593Smuzhiyun 		return ret;
97*4882a593Smuzhiyun 
98*4882a593Smuzhiyun 	return omap_gem_mmap_obj(obj, vma);
99*4882a593Smuzhiyun }
100*4882a593Smuzhiyun 
101*4882a593Smuzhiyun static const struct dma_buf_ops omap_dmabuf_ops = {
102*4882a593Smuzhiyun 	.map_dma_buf = omap_gem_map_dma_buf,
103*4882a593Smuzhiyun 	.unmap_dma_buf = omap_gem_unmap_dma_buf,
104*4882a593Smuzhiyun 	.release = drm_gem_dmabuf_release,
105*4882a593Smuzhiyun 	.begin_cpu_access = omap_gem_dmabuf_begin_cpu_access,
106*4882a593Smuzhiyun 	.end_cpu_access = omap_gem_dmabuf_end_cpu_access,
107*4882a593Smuzhiyun 	.mmap = omap_gem_dmabuf_mmap,
108*4882a593Smuzhiyun };
109*4882a593Smuzhiyun 
omap_gem_prime_export(struct drm_gem_object * obj,int flags)110*4882a593Smuzhiyun struct dma_buf *omap_gem_prime_export(struct drm_gem_object *obj, int flags)
111*4882a593Smuzhiyun {
112*4882a593Smuzhiyun 	DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
113*4882a593Smuzhiyun 
114*4882a593Smuzhiyun 	exp_info.ops = &omap_dmabuf_ops;
115*4882a593Smuzhiyun 	exp_info.size = obj->size;
116*4882a593Smuzhiyun 	exp_info.flags = flags;
117*4882a593Smuzhiyun 	exp_info.priv = obj;
118*4882a593Smuzhiyun 
119*4882a593Smuzhiyun 	return drm_gem_dmabuf_export(obj->dev, &exp_info);
120*4882a593Smuzhiyun }
121*4882a593Smuzhiyun 
122*4882a593Smuzhiyun /* -----------------------------------------------------------------------------
123*4882a593Smuzhiyun  * DMABUF Import
124*4882a593Smuzhiyun  */
125*4882a593Smuzhiyun 
omap_gem_prime_import(struct drm_device * dev,struct dma_buf * dma_buf)126*4882a593Smuzhiyun struct drm_gem_object *omap_gem_prime_import(struct drm_device *dev,
127*4882a593Smuzhiyun 					     struct dma_buf *dma_buf)
128*4882a593Smuzhiyun {
129*4882a593Smuzhiyun 	struct dma_buf_attachment *attach;
130*4882a593Smuzhiyun 	struct drm_gem_object *obj;
131*4882a593Smuzhiyun 	struct sg_table *sgt;
132*4882a593Smuzhiyun 	int ret;
133*4882a593Smuzhiyun 
134*4882a593Smuzhiyun 	if (dma_buf->ops == &omap_dmabuf_ops) {
135*4882a593Smuzhiyun 		obj = dma_buf->priv;
136*4882a593Smuzhiyun 		if (obj->dev == dev) {
137*4882a593Smuzhiyun 			/*
138*4882a593Smuzhiyun 			 * Importing dmabuf exported from out own gem increases
139*4882a593Smuzhiyun 			 * refcount on gem itself instead of f_count of dmabuf.
140*4882a593Smuzhiyun 			 */
141*4882a593Smuzhiyun 			drm_gem_object_get(obj);
142*4882a593Smuzhiyun 			return obj;
143*4882a593Smuzhiyun 		}
144*4882a593Smuzhiyun 	}
145*4882a593Smuzhiyun 
146*4882a593Smuzhiyun 	attach = dma_buf_attach(dma_buf, dev->dev);
147*4882a593Smuzhiyun 	if (IS_ERR(attach))
148*4882a593Smuzhiyun 		return ERR_CAST(attach);
149*4882a593Smuzhiyun 
150*4882a593Smuzhiyun 	get_dma_buf(dma_buf);
151*4882a593Smuzhiyun 
152*4882a593Smuzhiyun 	sgt = dma_buf_map_attachment(attach, DMA_TO_DEVICE);
153*4882a593Smuzhiyun 	if (IS_ERR(sgt)) {
154*4882a593Smuzhiyun 		ret = PTR_ERR(sgt);
155*4882a593Smuzhiyun 		goto fail_detach;
156*4882a593Smuzhiyun 	}
157*4882a593Smuzhiyun 
158*4882a593Smuzhiyun 	obj = omap_gem_new_dmabuf(dev, dma_buf->size, sgt);
159*4882a593Smuzhiyun 	if (IS_ERR(obj)) {
160*4882a593Smuzhiyun 		ret = PTR_ERR(obj);
161*4882a593Smuzhiyun 		goto fail_unmap;
162*4882a593Smuzhiyun 	}
163*4882a593Smuzhiyun 
164*4882a593Smuzhiyun 	obj->import_attach = attach;
165*4882a593Smuzhiyun 
166*4882a593Smuzhiyun 	return obj;
167*4882a593Smuzhiyun 
168*4882a593Smuzhiyun fail_unmap:
169*4882a593Smuzhiyun 	dma_buf_unmap_attachment(attach, sgt, DMA_TO_DEVICE);
170*4882a593Smuzhiyun fail_detach:
171*4882a593Smuzhiyun 	dma_buf_detach(dma_buf, attach);
172*4882a593Smuzhiyun 	dma_buf_put(dma_buf);
173*4882a593Smuzhiyun 
174*4882a593Smuzhiyun 	return ERR_PTR(ret);
175*4882a593Smuzhiyun }
176