xref: /OK3568_Linux_fs/kernel/drivers/media/common/videobuf2/videobuf2-cma-sg.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Copyright (C) 2022 Rockchip Electronics Co., Ltd
4*4882a593Smuzhiyun  * Based on videobuf2-dma-sg.c
5*4882a593Smuzhiyun  */
6*4882a593Smuzhiyun 
7*4882a593Smuzhiyun #include <linux/module.h>
8*4882a593Smuzhiyun #include <linux/mm.h>
9*4882a593Smuzhiyun #include <linux/refcount.h>
10*4882a593Smuzhiyun #include <linux/rk-dma-heap.h>
11*4882a593Smuzhiyun #include <linux/scatterlist.h>
12*4882a593Smuzhiyun #include <linux/sched.h>
13*4882a593Smuzhiyun #include <linux/slab.h>
14*4882a593Smuzhiyun #include <linux/vmalloc.h>
15*4882a593Smuzhiyun #include <linux/dma-map-ops.h>
16*4882a593Smuzhiyun #include <linux/cma.h>
17*4882a593Smuzhiyun 
18*4882a593Smuzhiyun #include <media/videobuf2-v4l2.h>
19*4882a593Smuzhiyun #include <media/videobuf2-memops.h>
20*4882a593Smuzhiyun #include <media/videobuf2-cma-sg.h>
21*4882a593Smuzhiyun 
22*4882a593Smuzhiyun struct vb2_cma_sg_buf {
23*4882a593Smuzhiyun 	struct device			*dev;
24*4882a593Smuzhiyun 	void				*vaddr;
25*4882a593Smuzhiyun 	struct page			**pages;
26*4882a593Smuzhiyun 	struct frame_vector		*vec;
27*4882a593Smuzhiyun 	int				offset;
28*4882a593Smuzhiyun 	unsigned long			dma_attrs;
29*4882a593Smuzhiyun 	enum dma_data_direction		dma_dir;
30*4882a593Smuzhiyun 	struct sg_table			sg_table;
31*4882a593Smuzhiyun 	/*
32*4882a593Smuzhiyun 	 * This will point to sg_table when used with the MMAP or USERPTR
33*4882a593Smuzhiyun 	 * memory model, and to the dma_buf sglist when used with the
34*4882a593Smuzhiyun 	 * DMABUF memory model.
35*4882a593Smuzhiyun 	 */
36*4882a593Smuzhiyun 	struct sg_table			*dma_sgt;
37*4882a593Smuzhiyun 	size_t				size;
38*4882a593Smuzhiyun 	unsigned int			num_pages;
39*4882a593Smuzhiyun 	refcount_t			refcount;
40*4882a593Smuzhiyun 	struct vb2_vmarea_handler	handler;
41*4882a593Smuzhiyun 
42*4882a593Smuzhiyun 	struct dma_buf_attachment	*db_attach;
43*4882a593Smuzhiyun };
44*4882a593Smuzhiyun 
45*4882a593Smuzhiyun static void vb2_cma_sg_put(void *buf_priv);
46*4882a593Smuzhiyun 
vb2_cma_sg_alloc_compacted(struct vb2_cma_sg_buf * buf,gfp_t gfp_flags)47*4882a593Smuzhiyun static int vb2_cma_sg_alloc_compacted(struct vb2_cma_sg_buf *buf,
48*4882a593Smuzhiyun 				      gfp_t gfp_flags)
49*4882a593Smuzhiyun {
50*4882a593Smuzhiyun 	unsigned int last_page = 0;
51*4882a593Smuzhiyun 	unsigned long size = buf->size;
52*4882a593Smuzhiyun 
53*4882a593Smuzhiyun 	while (size > 0) {
54*4882a593Smuzhiyun 		struct page *pages;
55*4882a593Smuzhiyun 		int order;
56*4882a593Smuzhiyun 		int i;
57*4882a593Smuzhiyun 
58*4882a593Smuzhiyun 		order = get_order(size);
59*4882a593Smuzhiyun 		/* Don't over allocate*/
60*4882a593Smuzhiyun 		if ((PAGE_SIZE << order) > size)
61*4882a593Smuzhiyun 			order--;
62*4882a593Smuzhiyun 
63*4882a593Smuzhiyun 		pages = NULL;
64*4882a593Smuzhiyun 		while (!pages) {
65*4882a593Smuzhiyun 			pages = alloc_pages(GFP_KERNEL | __GFP_ZERO |
66*4882a593Smuzhiyun 					__GFP_NOWARN | gfp_flags, order);
67*4882a593Smuzhiyun 			if (pages)
68*4882a593Smuzhiyun 				break;
69*4882a593Smuzhiyun 
70*4882a593Smuzhiyun 			if (order == 0) {
71*4882a593Smuzhiyun 				while (last_page--)
72*4882a593Smuzhiyun 					__free_page(buf->pages[last_page]);
73*4882a593Smuzhiyun 				return -ENOMEM;
74*4882a593Smuzhiyun 			}
75*4882a593Smuzhiyun 			order--;
76*4882a593Smuzhiyun 		}
77*4882a593Smuzhiyun 
78*4882a593Smuzhiyun 		split_page(pages, order);
79*4882a593Smuzhiyun 		for (i = 0; i < (1 << order); i++)
80*4882a593Smuzhiyun 			buf->pages[last_page++] = &pages[i];
81*4882a593Smuzhiyun 
82*4882a593Smuzhiyun 		size -= PAGE_SIZE << order;
83*4882a593Smuzhiyun 	}
84*4882a593Smuzhiyun 
85*4882a593Smuzhiyun 	return 0;
86*4882a593Smuzhiyun }
87*4882a593Smuzhiyun 
vb2_cma_sg_free_compacted(struct vb2_cma_sg_buf * buf)88*4882a593Smuzhiyun static void vb2_cma_sg_free_compacted(struct vb2_cma_sg_buf *buf)
89*4882a593Smuzhiyun {
90*4882a593Smuzhiyun 	int num_pages = buf->num_pages;
91*4882a593Smuzhiyun 
92*4882a593Smuzhiyun 	while (num_pages--) {
93*4882a593Smuzhiyun 		__free_page(buf->pages[num_pages]);
94*4882a593Smuzhiyun 		buf->pages[num_pages] = NULL;
95*4882a593Smuzhiyun 	}
96*4882a593Smuzhiyun }
97*4882a593Smuzhiyun 
vb2_cma_sg_alloc_contiguous(struct vb2_cma_sg_buf * buf)98*4882a593Smuzhiyun static int vb2_cma_sg_alloc_contiguous(struct vb2_cma_sg_buf *buf)
99*4882a593Smuzhiyun {
100*4882a593Smuzhiyun 	struct rk_dma_heap *heap __maybe_unused;
101*4882a593Smuzhiyun 	struct page *page = NULL;
102*4882a593Smuzhiyun 	int i;
103*4882a593Smuzhiyun 	bool cma_en = false;
104*4882a593Smuzhiyun 
105*4882a593Smuzhiyun 	if (IS_ENABLED(CONFIG_CMA)) {
106*4882a593Smuzhiyun 		struct rk_dma_heap *heap = rk_dma_heap_find("rk-dma-heap-cma");
107*4882a593Smuzhiyun 
108*4882a593Smuzhiyun 		cma_en = true;
109*4882a593Smuzhiyun 		if (heap)
110*4882a593Smuzhiyun 			page = rk_dma_heap_alloc_contig_pages(heap, buf->size,
111*4882a593Smuzhiyun 							      dev_name(buf->dev));
112*4882a593Smuzhiyun 		else
113*4882a593Smuzhiyun 			page = cma_alloc(dev_get_cma_area(buf->dev), buf->num_pages,
114*4882a593Smuzhiyun 					 get_order(buf->size), GFP_KERNEL);
115*4882a593Smuzhiyun 	}
116*4882a593Smuzhiyun 	if (IS_ERR_OR_NULL(page)) {
117*4882a593Smuzhiyun 		pr_err("%s: cma_en:%d alloc pages fail\n", __func__, cma_en);
118*4882a593Smuzhiyun 		return -ENOMEM;
119*4882a593Smuzhiyun 	}
120*4882a593Smuzhiyun 	for (i = 0; i < buf->num_pages; i++)
121*4882a593Smuzhiyun 		buf->pages[i] = page + i;
122*4882a593Smuzhiyun 
123*4882a593Smuzhiyun 	return 0;
124*4882a593Smuzhiyun }
125*4882a593Smuzhiyun 
vb2_cma_sg_free_contiguous(struct vb2_cma_sg_buf * buf)126*4882a593Smuzhiyun static void vb2_cma_sg_free_contiguous(struct vb2_cma_sg_buf *buf)
127*4882a593Smuzhiyun {
128*4882a593Smuzhiyun 	if (IS_ENABLED(CONFIG_CMA)) {
129*4882a593Smuzhiyun 		struct rk_dma_heap *heap = rk_dma_heap_find("rk-dma-heap-cma");
130*4882a593Smuzhiyun 
131*4882a593Smuzhiyun 		if (heap)
132*4882a593Smuzhiyun 			rk_dma_heap_free_contig_pages(heap, buf->pages[0],
133*4882a593Smuzhiyun 						      buf->size, dev_name(buf->dev));
134*4882a593Smuzhiyun 		else
135*4882a593Smuzhiyun 			cma_release(dev_get_cma_area(buf->dev),
136*4882a593Smuzhiyun 				    buf->pages[0], buf->num_pages);
137*4882a593Smuzhiyun 	}
138*4882a593Smuzhiyun }
139*4882a593Smuzhiyun 
vb2_cma_sg_alloc(struct device * dev,unsigned long dma_attrs,unsigned long size,enum dma_data_direction dma_dir,gfp_t gfp_flags)140*4882a593Smuzhiyun static void *vb2_cma_sg_alloc(struct device *dev, unsigned long dma_attrs,
141*4882a593Smuzhiyun 			      unsigned long size,
142*4882a593Smuzhiyun 			      enum dma_data_direction dma_dir,
143*4882a593Smuzhiyun 			      gfp_t gfp_flags)
144*4882a593Smuzhiyun {
145*4882a593Smuzhiyun 	struct vb2_cma_sg_buf *buf;
146*4882a593Smuzhiyun 	struct sg_table *sgt;
147*4882a593Smuzhiyun 	int ret;
148*4882a593Smuzhiyun 
149*4882a593Smuzhiyun 	if (WARN_ON(!dev))
150*4882a593Smuzhiyun 		return ERR_PTR(-EINVAL);
151*4882a593Smuzhiyun 
152*4882a593Smuzhiyun 	buf = kzalloc(sizeof(*buf), GFP_KERNEL);
153*4882a593Smuzhiyun 	if (!buf)
154*4882a593Smuzhiyun 		return ERR_PTR(-ENOMEM);
155*4882a593Smuzhiyun 
156*4882a593Smuzhiyun 	buf->vaddr = NULL;
157*4882a593Smuzhiyun 	buf->dma_attrs = dma_attrs;
158*4882a593Smuzhiyun 	buf->dma_dir = dma_dir;
159*4882a593Smuzhiyun 	buf->offset = 0;
160*4882a593Smuzhiyun 	buf->size = size;
161*4882a593Smuzhiyun 	/* size is already page aligned */
162*4882a593Smuzhiyun 	buf->num_pages = size >> PAGE_SHIFT;
163*4882a593Smuzhiyun 	buf->dma_sgt = &buf->sg_table;
164*4882a593Smuzhiyun 	/* Prevent the device from being released while the buffer is used */
165*4882a593Smuzhiyun 	buf->dev = get_device(dev);
166*4882a593Smuzhiyun 
167*4882a593Smuzhiyun 	buf->pages = kvmalloc_array(buf->num_pages, sizeof(struct page *),
168*4882a593Smuzhiyun 				    GFP_KERNEL | __GFP_ZERO);
169*4882a593Smuzhiyun 	if (!buf->pages)
170*4882a593Smuzhiyun 		goto fail_pages_array_alloc;
171*4882a593Smuzhiyun 
172*4882a593Smuzhiyun 	if (dma_attrs & DMA_ATTR_FORCE_CONTIGUOUS)
173*4882a593Smuzhiyun 		ret = vb2_cma_sg_alloc_contiguous(buf);
174*4882a593Smuzhiyun 	else
175*4882a593Smuzhiyun 		ret = vb2_cma_sg_alloc_compacted(buf, gfp_flags);
176*4882a593Smuzhiyun 	if (ret)
177*4882a593Smuzhiyun 		goto fail_pages_alloc;
178*4882a593Smuzhiyun 
179*4882a593Smuzhiyun 	ret = sg_alloc_table_from_pages(buf->dma_sgt, buf->pages,
180*4882a593Smuzhiyun 			buf->num_pages, 0, size, GFP_KERNEL);
181*4882a593Smuzhiyun 	if (ret)
182*4882a593Smuzhiyun 		goto fail_table_alloc;
183*4882a593Smuzhiyun 
184*4882a593Smuzhiyun 	sgt = &buf->sg_table;
185*4882a593Smuzhiyun 	/*
186*4882a593Smuzhiyun 	 * No need to sync to the device, this will happen later when the
187*4882a593Smuzhiyun 	 * prepare() memop is called.
188*4882a593Smuzhiyun 	 */
189*4882a593Smuzhiyun 	if (dma_map_sgtable(buf->dev, sgt, buf->dma_dir,
190*4882a593Smuzhiyun 			    DMA_ATTR_SKIP_CPU_SYNC))
191*4882a593Smuzhiyun 		goto fail_map;
192*4882a593Smuzhiyun 
193*4882a593Smuzhiyun 	buf->handler.refcount = &buf->refcount;
194*4882a593Smuzhiyun 	buf->handler.put = vb2_cma_sg_put;
195*4882a593Smuzhiyun 	buf->handler.arg = buf;
196*4882a593Smuzhiyun 
197*4882a593Smuzhiyun 	refcount_set(&buf->refcount, 1);
198*4882a593Smuzhiyun 
199*4882a593Smuzhiyun 	return buf;
200*4882a593Smuzhiyun 
201*4882a593Smuzhiyun fail_map:
202*4882a593Smuzhiyun 	sg_free_table(buf->dma_sgt);
203*4882a593Smuzhiyun fail_table_alloc:
204*4882a593Smuzhiyun 	if (dma_attrs & DMA_ATTR_FORCE_CONTIGUOUS)
205*4882a593Smuzhiyun 		vb2_cma_sg_free_contiguous(buf);
206*4882a593Smuzhiyun 	else
207*4882a593Smuzhiyun 		vb2_cma_sg_free_compacted(buf);
208*4882a593Smuzhiyun fail_pages_alloc:
209*4882a593Smuzhiyun 	kvfree(buf->pages);
210*4882a593Smuzhiyun fail_pages_array_alloc:
211*4882a593Smuzhiyun 	put_device(buf->dev);
212*4882a593Smuzhiyun 	kfree(buf);
213*4882a593Smuzhiyun 	return ERR_PTR(-ENOMEM);
214*4882a593Smuzhiyun }
215*4882a593Smuzhiyun 
vb2_cma_sg_put(void * buf_priv)216*4882a593Smuzhiyun static void vb2_cma_sg_put(void *buf_priv)
217*4882a593Smuzhiyun {
218*4882a593Smuzhiyun 	struct vb2_cma_sg_buf *buf = buf_priv;
219*4882a593Smuzhiyun 	struct sg_table *sgt = &buf->sg_table;
220*4882a593Smuzhiyun 
221*4882a593Smuzhiyun 	if (refcount_dec_and_test(&buf->refcount)) {
222*4882a593Smuzhiyun 		dma_unmap_sgtable(buf->dev, sgt, buf->dma_dir,
223*4882a593Smuzhiyun 				  DMA_ATTR_SKIP_CPU_SYNC);
224*4882a593Smuzhiyun 		if (buf->vaddr)
225*4882a593Smuzhiyun 			vm_unmap_ram(buf->vaddr, buf->num_pages);
226*4882a593Smuzhiyun 		sg_free_table(buf->dma_sgt);
227*4882a593Smuzhiyun 		if (buf->dma_attrs & DMA_ATTR_FORCE_CONTIGUOUS)
228*4882a593Smuzhiyun 			vb2_cma_sg_free_contiguous(buf);
229*4882a593Smuzhiyun 		else
230*4882a593Smuzhiyun 			vb2_cma_sg_free_compacted(buf);
231*4882a593Smuzhiyun 		kvfree(buf->pages);
232*4882a593Smuzhiyun 		buf->pages = NULL;
233*4882a593Smuzhiyun 		put_device(buf->dev);
234*4882a593Smuzhiyun 		kfree(buf);
235*4882a593Smuzhiyun 	}
236*4882a593Smuzhiyun }
237*4882a593Smuzhiyun 
vb2_cma_sg_prepare(void * buf_priv)238*4882a593Smuzhiyun static void vb2_cma_sg_prepare(void *buf_priv)
239*4882a593Smuzhiyun {
240*4882a593Smuzhiyun 	struct vb2_cma_sg_buf *buf = buf_priv;
241*4882a593Smuzhiyun 	struct sg_table *sgt = buf->dma_sgt;
242*4882a593Smuzhiyun 
243*4882a593Smuzhiyun 	dma_sync_sgtable_for_device(buf->dev, sgt, buf->dma_dir);
244*4882a593Smuzhiyun }
245*4882a593Smuzhiyun 
vb2_cma_sg_finish(void * buf_priv)246*4882a593Smuzhiyun static void vb2_cma_sg_finish(void *buf_priv)
247*4882a593Smuzhiyun {
248*4882a593Smuzhiyun 	struct vb2_cma_sg_buf *buf = buf_priv;
249*4882a593Smuzhiyun 	struct sg_table *sgt = buf->dma_sgt;
250*4882a593Smuzhiyun 
251*4882a593Smuzhiyun 	dma_sync_sgtable_for_cpu(buf->dev, sgt, buf->dma_dir);
252*4882a593Smuzhiyun }
253*4882a593Smuzhiyun 
vb2_cma_sg_get_userptr(struct device * dev,unsigned long vaddr,unsigned long size,enum dma_data_direction dma_dir)254*4882a593Smuzhiyun static void *vb2_cma_sg_get_userptr(struct device *dev, unsigned long vaddr,
255*4882a593Smuzhiyun 				    unsigned long size,
256*4882a593Smuzhiyun 				    enum dma_data_direction dma_dir)
257*4882a593Smuzhiyun {
258*4882a593Smuzhiyun 	struct vb2_cma_sg_buf *buf;
259*4882a593Smuzhiyun 	struct sg_table *sgt;
260*4882a593Smuzhiyun 	struct frame_vector *vec;
261*4882a593Smuzhiyun 
262*4882a593Smuzhiyun 	if (WARN_ON(!dev))
263*4882a593Smuzhiyun 		return ERR_PTR(-EINVAL);
264*4882a593Smuzhiyun 
265*4882a593Smuzhiyun 	buf = kzalloc(sizeof(*buf), GFP_KERNEL);
266*4882a593Smuzhiyun 	if (!buf)
267*4882a593Smuzhiyun 		return ERR_PTR(-ENOMEM);
268*4882a593Smuzhiyun 
269*4882a593Smuzhiyun 	buf->vaddr = NULL;
270*4882a593Smuzhiyun 	buf->dev = dev;
271*4882a593Smuzhiyun 	buf->dma_dir = dma_dir;
272*4882a593Smuzhiyun 	buf->offset = vaddr & ~PAGE_MASK;
273*4882a593Smuzhiyun 	buf->size = size;
274*4882a593Smuzhiyun 	buf->dma_sgt = &buf->sg_table;
275*4882a593Smuzhiyun 	vec = vb2_create_framevec(vaddr, size);
276*4882a593Smuzhiyun 	if (IS_ERR(vec))
277*4882a593Smuzhiyun 		goto userptr_fail_pfnvec;
278*4882a593Smuzhiyun 	buf->vec = vec;
279*4882a593Smuzhiyun 
280*4882a593Smuzhiyun 	buf->pages = frame_vector_pages(vec);
281*4882a593Smuzhiyun 	if (IS_ERR(buf->pages))
282*4882a593Smuzhiyun 		goto userptr_fail_sgtable;
283*4882a593Smuzhiyun 	buf->num_pages = frame_vector_count(vec);
284*4882a593Smuzhiyun 
285*4882a593Smuzhiyun 	if (sg_alloc_table_from_pages(buf->dma_sgt, buf->pages,
286*4882a593Smuzhiyun 			buf->num_pages, buf->offset, size, 0))
287*4882a593Smuzhiyun 		goto userptr_fail_sgtable;
288*4882a593Smuzhiyun 
289*4882a593Smuzhiyun 	sgt = &buf->sg_table;
290*4882a593Smuzhiyun 	/*
291*4882a593Smuzhiyun 	 * No need to sync to the device, this will happen later when the
292*4882a593Smuzhiyun 	 * prepare() memop is called.
293*4882a593Smuzhiyun 	 */
294*4882a593Smuzhiyun 	if (dma_map_sgtable(buf->dev, sgt, buf->dma_dir,
295*4882a593Smuzhiyun 			    DMA_ATTR_SKIP_CPU_SYNC))
296*4882a593Smuzhiyun 		goto userptr_fail_map;
297*4882a593Smuzhiyun 
298*4882a593Smuzhiyun 	return buf;
299*4882a593Smuzhiyun 
300*4882a593Smuzhiyun userptr_fail_map:
301*4882a593Smuzhiyun 	sg_free_table(&buf->sg_table);
302*4882a593Smuzhiyun userptr_fail_sgtable:
303*4882a593Smuzhiyun 	vb2_destroy_framevec(vec);
304*4882a593Smuzhiyun userptr_fail_pfnvec:
305*4882a593Smuzhiyun 	kfree(buf);
306*4882a593Smuzhiyun 	return ERR_PTR(-ENOMEM);
307*4882a593Smuzhiyun }
308*4882a593Smuzhiyun 
309*4882a593Smuzhiyun /*
310*4882a593Smuzhiyun  * @put_userptr: inform the allocator that a USERPTR buffer will no longer
311*4882a593Smuzhiyun  *		 be used
312*4882a593Smuzhiyun  */
vb2_cma_sg_put_userptr(void * buf_priv)313*4882a593Smuzhiyun static void vb2_cma_sg_put_userptr(void *buf_priv)
314*4882a593Smuzhiyun {
315*4882a593Smuzhiyun 	struct vb2_cma_sg_buf *buf = buf_priv;
316*4882a593Smuzhiyun 	struct sg_table *sgt = &buf->sg_table;
317*4882a593Smuzhiyun 	int i = buf->num_pages;
318*4882a593Smuzhiyun 
319*4882a593Smuzhiyun 	dma_unmap_sgtable(buf->dev, sgt, buf->dma_dir, DMA_ATTR_SKIP_CPU_SYNC);
320*4882a593Smuzhiyun 	if (buf->vaddr)
321*4882a593Smuzhiyun 		vm_unmap_ram(buf->vaddr, buf->num_pages);
322*4882a593Smuzhiyun 	sg_free_table(buf->dma_sgt);
323*4882a593Smuzhiyun 	if (buf->dma_dir == DMA_FROM_DEVICE ||
324*4882a593Smuzhiyun 	    buf->dma_dir == DMA_BIDIRECTIONAL)
325*4882a593Smuzhiyun 		while (--i >= 0)
326*4882a593Smuzhiyun 			set_page_dirty_lock(buf->pages[i]);
327*4882a593Smuzhiyun 	vb2_destroy_framevec(buf->vec);
328*4882a593Smuzhiyun 	kfree(buf);
329*4882a593Smuzhiyun }
330*4882a593Smuzhiyun 
vb2_cma_sg_vaddr(void * buf_priv)331*4882a593Smuzhiyun static void *vb2_cma_sg_vaddr(void *buf_priv)
332*4882a593Smuzhiyun {
333*4882a593Smuzhiyun 	struct vb2_cma_sg_buf *buf = buf_priv;
334*4882a593Smuzhiyun 
335*4882a593Smuzhiyun 	WARN_ON(!buf);
336*4882a593Smuzhiyun 
337*4882a593Smuzhiyun 	if (!buf->vaddr) {
338*4882a593Smuzhiyun 		if (buf->db_attach)
339*4882a593Smuzhiyun 			buf->vaddr = dma_buf_vmap(buf->db_attach->dmabuf);
340*4882a593Smuzhiyun 		else
341*4882a593Smuzhiyun 			buf->vaddr = vm_map_ram(buf->pages, buf->num_pages, -1);
342*4882a593Smuzhiyun 	}
343*4882a593Smuzhiyun 
344*4882a593Smuzhiyun 	/* add offset in case userptr is not page-aligned */
345*4882a593Smuzhiyun 	return buf->vaddr ? buf->vaddr + buf->offset : NULL;
346*4882a593Smuzhiyun }
347*4882a593Smuzhiyun 
vb2_cma_sg_num_users(void * buf_priv)348*4882a593Smuzhiyun static unsigned int vb2_cma_sg_num_users(void *buf_priv)
349*4882a593Smuzhiyun {
350*4882a593Smuzhiyun 	struct vb2_cma_sg_buf *buf = buf_priv;
351*4882a593Smuzhiyun 
352*4882a593Smuzhiyun 	return refcount_read(&buf->refcount);
353*4882a593Smuzhiyun }
354*4882a593Smuzhiyun 
vb2_cma_sg_mmap(void * buf_priv,struct vm_area_struct * vma)355*4882a593Smuzhiyun static int vb2_cma_sg_mmap(void *buf_priv, struct vm_area_struct *vma)
356*4882a593Smuzhiyun {
357*4882a593Smuzhiyun 	struct vb2_cma_sg_buf *buf = buf_priv;
358*4882a593Smuzhiyun 	int err;
359*4882a593Smuzhiyun 
360*4882a593Smuzhiyun 	if (!buf) {
361*4882a593Smuzhiyun 		pr_err("No memory to map\n");
362*4882a593Smuzhiyun 		return -EINVAL;
363*4882a593Smuzhiyun 	}
364*4882a593Smuzhiyun 
365*4882a593Smuzhiyun 	err = vm_map_pages(vma, buf->pages, buf->num_pages);
366*4882a593Smuzhiyun 	if (err) {
367*4882a593Smuzhiyun 		pr_err("Remapping memory, error: %d\n", err);
368*4882a593Smuzhiyun 		return err;
369*4882a593Smuzhiyun 	}
370*4882a593Smuzhiyun 
371*4882a593Smuzhiyun 	/*
372*4882a593Smuzhiyun 	 * Use common vm_area operations to track buffer refcount.
373*4882a593Smuzhiyun 	 */
374*4882a593Smuzhiyun 	vma->vm_private_data	= &buf->handler;
375*4882a593Smuzhiyun 	vma->vm_ops		= &vb2_common_vm_ops;
376*4882a593Smuzhiyun 
377*4882a593Smuzhiyun 	vma->vm_ops->open(vma);
378*4882a593Smuzhiyun 
379*4882a593Smuzhiyun 	return 0;
380*4882a593Smuzhiyun }
381*4882a593Smuzhiyun 
382*4882a593Smuzhiyun /*********************************************/
383*4882a593Smuzhiyun /*         DMABUF ops for exporters          */
384*4882a593Smuzhiyun /*********************************************/
385*4882a593Smuzhiyun 
386*4882a593Smuzhiyun struct vb2_cma_sg_attachment {
387*4882a593Smuzhiyun 	struct sg_table sgt;
388*4882a593Smuzhiyun 	enum dma_data_direction dma_dir;
389*4882a593Smuzhiyun };
390*4882a593Smuzhiyun 
vb2_cma_sg_dmabuf_ops_attach(struct dma_buf * dbuf,struct dma_buf_attachment * dbuf_attach)391*4882a593Smuzhiyun static int vb2_cma_sg_dmabuf_ops_attach(struct dma_buf *dbuf,
392*4882a593Smuzhiyun 	struct dma_buf_attachment *dbuf_attach)
393*4882a593Smuzhiyun {
394*4882a593Smuzhiyun 	struct vb2_cma_sg_attachment *attach;
395*4882a593Smuzhiyun 	unsigned int i;
396*4882a593Smuzhiyun 	struct scatterlist *rd, *wr;
397*4882a593Smuzhiyun 	struct sg_table *sgt;
398*4882a593Smuzhiyun 	struct vb2_cma_sg_buf *buf = dbuf->priv;
399*4882a593Smuzhiyun 	int ret;
400*4882a593Smuzhiyun 
401*4882a593Smuzhiyun 	attach = kzalloc(sizeof(*attach), GFP_KERNEL);
402*4882a593Smuzhiyun 	if (!attach)
403*4882a593Smuzhiyun 		return -ENOMEM;
404*4882a593Smuzhiyun 
405*4882a593Smuzhiyun 	sgt = &attach->sgt;
406*4882a593Smuzhiyun 	/* Copy the buf->base_sgt scatter list to the attachment, as we can't
407*4882a593Smuzhiyun 	 * map the same scatter list to multiple attachments at the same time.
408*4882a593Smuzhiyun 	 */
409*4882a593Smuzhiyun 	ret = sg_alloc_table(sgt, buf->dma_sgt->orig_nents, GFP_KERNEL);
410*4882a593Smuzhiyun 	if (ret) {
411*4882a593Smuzhiyun 		kfree(attach);
412*4882a593Smuzhiyun 		return -ENOMEM;
413*4882a593Smuzhiyun 	}
414*4882a593Smuzhiyun 
415*4882a593Smuzhiyun 	rd = buf->dma_sgt->sgl;
416*4882a593Smuzhiyun 	wr = sgt->sgl;
417*4882a593Smuzhiyun 	for (i = 0; i < sgt->orig_nents; ++i) {
418*4882a593Smuzhiyun 		sg_set_page(wr, sg_page(rd), rd->length, rd->offset);
419*4882a593Smuzhiyun 		rd = sg_next(rd);
420*4882a593Smuzhiyun 		wr = sg_next(wr);
421*4882a593Smuzhiyun 	}
422*4882a593Smuzhiyun 
423*4882a593Smuzhiyun 	attach->dma_dir = DMA_NONE;
424*4882a593Smuzhiyun 	dbuf_attach->priv = attach;
425*4882a593Smuzhiyun 
426*4882a593Smuzhiyun 	return 0;
427*4882a593Smuzhiyun }
428*4882a593Smuzhiyun 
vb2_cma_sg_dmabuf_ops_detach(struct dma_buf * dbuf,struct dma_buf_attachment * db_attach)429*4882a593Smuzhiyun static void vb2_cma_sg_dmabuf_ops_detach(struct dma_buf *dbuf,
430*4882a593Smuzhiyun 	struct dma_buf_attachment *db_attach)
431*4882a593Smuzhiyun {
432*4882a593Smuzhiyun 	struct vb2_cma_sg_attachment *attach = db_attach->priv;
433*4882a593Smuzhiyun 	struct sg_table *sgt;
434*4882a593Smuzhiyun 
435*4882a593Smuzhiyun 	if (!attach)
436*4882a593Smuzhiyun 		return;
437*4882a593Smuzhiyun 
438*4882a593Smuzhiyun 	sgt = &attach->sgt;
439*4882a593Smuzhiyun 
440*4882a593Smuzhiyun 	/* release the scatterlist cache */
441*4882a593Smuzhiyun 	if (attach->dma_dir != DMA_NONE)
442*4882a593Smuzhiyun 		dma_unmap_sgtable(db_attach->dev, sgt, attach->dma_dir, 0);
443*4882a593Smuzhiyun 	sg_free_table(sgt);
444*4882a593Smuzhiyun 	kfree(attach);
445*4882a593Smuzhiyun 	db_attach->priv = NULL;
446*4882a593Smuzhiyun }
447*4882a593Smuzhiyun 
vb2_cma_sg_dmabuf_ops_map(struct dma_buf_attachment * db_attach,enum dma_data_direction dma_dir)448*4882a593Smuzhiyun static struct sg_table *vb2_cma_sg_dmabuf_ops_map(
449*4882a593Smuzhiyun 	struct dma_buf_attachment *db_attach, enum dma_data_direction dma_dir)
450*4882a593Smuzhiyun {
451*4882a593Smuzhiyun 	struct vb2_cma_sg_attachment *attach = db_attach->priv;
452*4882a593Smuzhiyun 	/* stealing dmabuf mutex to serialize map/unmap operations */
453*4882a593Smuzhiyun 	struct mutex *lock = &db_attach->dmabuf->lock;
454*4882a593Smuzhiyun 	struct sg_table *sgt;
455*4882a593Smuzhiyun 
456*4882a593Smuzhiyun 	mutex_lock(lock);
457*4882a593Smuzhiyun 
458*4882a593Smuzhiyun 	sgt = &attach->sgt;
459*4882a593Smuzhiyun 	/* return previously mapped sg table */
460*4882a593Smuzhiyun 	if (attach->dma_dir == dma_dir) {
461*4882a593Smuzhiyun 		mutex_unlock(lock);
462*4882a593Smuzhiyun 		return sgt;
463*4882a593Smuzhiyun 	}
464*4882a593Smuzhiyun 
465*4882a593Smuzhiyun 	/* release any previous cache */
466*4882a593Smuzhiyun 	if (attach->dma_dir != DMA_NONE) {
467*4882a593Smuzhiyun 		dma_unmap_sgtable(db_attach->dev, sgt, attach->dma_dir, 0);
468*4882a593Smuzhiyun 		attach->dma_dir = DMA_NONE;
469*4882a593Smuzhiyun 	}
470*4882a593Smuzhiyun 
471*4882a593Smuzhiyun 	/* mapping to the client with new direction */
472*4882a593Smuzhiyun 	if (dma_map_sgtable(db_attach->dev, sgt, dma_dir, 0)) {
473*4882a593Smuzhiyun 		pr_err("failed to map scatterlist\n");
474*4882a593Smuzhiyun 		mutex_unlock(lock);
475*4882a593Smuzhiyun 		return ERR_PTR(-EIO);
476*4882a593Smuzhiyun 	}
477*4882a593Smuzhiyun 
478*4882a593Smuzhiyun 	attach->dma_dir = dma_dir;
479*4882a593Smuzhiyun 
480*4882a593Smuzhiyun 	mutex_unlock(lock);
481*4882a593Smuzhiyun 
482*4882a593Smuzhiyun 	return sgt;
483*4882a593Smuzhiyun }
484*4882a593Smuzhiyun 
vb2_cma_sg_dmabuf_ops_unmap(struct dma_buf_attachment * db_attach,struct sg_table * sgt,enum dma_data_direction dma_dir)485*4882a593Smuzhiyun static void vb2_cma_sg_dmabuf_ops_unmap(struct dma_buf_attachment *db_attach,
486*4882a593Smuzhiyun 	struct sg_table *sgt, enum dma_data_direction dma_dir)
487*4882a593Smuzhiyun {
488*4882a593Smuzhiyun 	/* nothing to be done here */
489*4882a593Smuzhiyun }
490*4882a593Smuzhiyun 
vb2_cma_sg_dmabuf_ops_release(struct dma_buf * dbuf)491*4882a593Smuzhiyun static void vb2_cma_sg_dmabuf_ops_release(struct dma_buf *dbuf)
492*4882a593Smuzhiyun {
493*4882a593Smuzhiyun 	/* drop reference obtained in vb2_cma_sg_get_dmabuf */
494*4882a593Smuzhiyun 	vb2_cma_sg_put(dbuf->priv);
495*4882a593Smuzhiyun }
496*4882a593Smuzhiyun 
497*4882a593Smuzhiyun static int
vb2_cma_sg_dmabuf_ops_begin_cpu_access(struct dma_buf * dbuf,enum dma_data_direction direction)498*4882a593Smuzhiyun vb2_cma_sg_dmabuf_ops_begin_cpu_access(struct dma_buf *dbuf,
499*4882a593Smuzhiyun 				       enum dma_data_direction direction)
500*4882a593Smuzhiyun {
501*4882a593Smuzhiyun 	struct vb2_cma_sg_buf *buf = dbuf->priv;
502*4882a593Smuzhiyun 	struct sg_table *sgt = buf->dma_sgt;
503*4882a593Smuzhiyun 
504*4882a593Smuzhiyun 	dma_sync_sgtable_for_cpu(buf->dev, sgt, buf->dma_dir);
505*4882a593Smuzhiyun 	return 0;
506*4882a593Smuzhiyun }
507*4882a593Smuzhiyun 
508*4882a593Smuzhiyun static int
vb2_cma_sg_dmabuf_ops_end_cpu_access(struct dma_buf * dbuf,enum dma_data_direction direction)509*4882a593Smuzhiyun vb2_cma_sg_dmabuf_ops_end_cpu_access(struct dma_buf *dbuf,
510*4882a593Smuzhiyun 				     enum dma_data_direction direction)
511*4882a593Smuzhiyun {
512*4882a593Smuzhiyun 	struct vb2_cma_sg_buf *buf = dbuf->priv;
513*4882a593Smuzhiyun 	struct sg_table *sgt = buf->dma_sgt;
514*4882a593Smuzhiyun 
515*4882a593Smuzhiyun 	dma_sync_sgtable_for_device(buf->dev, sgt, buf->dma_dir);
516*4882a593Smuzhiyun 	return 0;
517*4882a593Smuzhiyun }
518*4882a593Smuzhiyun 
vb2_cma_sg_dmabuf_ops_vmap(struct dma_buf * dbuf)519*4882a593Smuzhiyun static void *vb2_cma_sg_dmabuf_ops_vmap(struct dma_buf *dbuf)
520*4882a593Smuzhiyun {
521*4882a593Smuzhiyun 	struct vb2_cma_sg_buf *buf = dbuf->priv;
522*4882a593Smuzhiyun 
523*4882a593Smuzhiyun 	return vb2_cma_sg_vaddr(buf);
524*4882a593Smuzhiyun }
525*4882a593Smuzhiyun 
vb2_cma_sg_dmabuf_ops_mmap(struct dma_buf * dbuf,struct vm_area_struct * vma)526*4882a593Smuzhiyun static int vb2_cma_sg_dmabuf_ops_mmap(struct dma_buf *dbuf,
527*4882a593Smuzhiyun 	struct vm_area_struct *vma)
528*4882a593Smuzhiyun {
529*4882a593Smuzhiyun 	return vb2_cma_sg_mmap(dbuf->priv, vma);
530*4882a593Smuzhiyun }
531*4882a593Smuzhiyun 
532*4882a593Smuzhiyun static const struct dma_buf_ops vb2_cma_sg_dmabuf_ops = {
533*4882a593Smuzhiyun 	.attach = vb2_cma_sg_dmabuf_ops_attach,
534*4882a593Smuzhiyun 	.detach = vb2_cma_sg_dmabuf_ops_detach,
535*4882a593Smuzhiyun 	.map_dma_buf = vb2_cma_sg_dmabuf_ops_map,
536*4882a593Smuzhiyun 	.unmap_dma_buf = vb2_cma_sg_dmabuf_ops_unmap,
537*4882a593Smuzhiyun 	.begin_cpu_access = vb2_cma_sg_dmabuf_ops_begin_cpu_access,
538*4882a593Smuzhiyun 	.end_cpu_access = vb2_cma_sg_dmabuf_ops_end_cpu_access,
539*4882a593Smuzhiyun 	.vmap = vb2_cma_sg_dmabuf_ops_vmap,
540*4882a593Smuzhiyun 	.mmap = vb2_cma_sg_dmabuf_ops_mmap,
541*4882a593Smuzhiyun 	.release = vb2_cma_sg_dmabuf_ops_release,
542*4882a593Smuzhiyun };
543*4882a593Smuzhiyun 
vb2_cma_sg_get_dmabuf(void * buf_priv,unsigned long flags)544*4882a593Smuzhiyun static struct dma_buf *vb2_cma_sg_get_dmabuf(void *buf_priv, unsigned long flags)
545*4882a593Smuzhiyun {
546*4882a593Smuzhiyun 	struct vb2_cma_sg_buf *buf = buf_priv;
547*4882a593Smuzhiyun 	struct dma_buf *dbuf;
548*4882a593Smuzhiyun 	DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
549*4882a593Smuzhiyun 
550*4882a593Smuzhiyun 	exp_info.ops = &vb2_cma_sg_dmabuf_ops;
551*4882a593Smuzhiyun 	exp_info.size = buf->size;
552*4882a593Smuzhiyun 	exp_info.flags = flags;
553*4882a593Smuzhiyun 	exp_info.priv = buf;
554*4882a593Smuzhiyun 
555*4882a593Smuzhiyun 	if (WARN_ON(!buf->dma_sgt))
556*4882a593Smuzhiyun 		return NULL;
557*4882a593Smuzhiyun 
558*4882a593Smuzhiyun 	dbuf = dma_buf_export(&exp_info);
559*4882a593Smuzhiyun 	if (IS_ERR(dbuf))
560*4882a593Smuzhiyun 		return NULL;
561*4882a593Smuzhiyun 
562*4882a593Smuzhiyun 	/* dmabuf keeps reference to vb2 buffer */
563*4882a593Smuzhiyun 	refcount_inc(&buf->refcount);
564*4882a593Smuzhiyun 
565*4882a593Smuzhiyun 	return dbuf;
566*4882a593Smuzhiyun }
567*4882a593Smuzhiyun 
568*4882a593Smuzhiyun /*********************************************/
569*4882a593Smuzhiyun /*       callbacks for DMABUF buffers        */
570*4882a593Smuzhiyun /*********************************************/
571*4882a593Smuzhiyun 
vb2_cma_sg_map_dmabuf(void * mem_priv)572*4882a593Smuzhiyun static int vb2_cma_sg_map_dmabuf(void *mem_priv)
573*4882a593Smuzhiyun {
574*4882a593Smuzhiyun 	struct vb2_cma_sg_buf *buf = mem_priv;
575*4882a593Smuzhiyun 	struct sg_table *sgt;
576*4882a593Smuzhiyun 
577*4882a593Smuzhiyun 	if (WARN_ON(!buf->db_attach)) {
578*4882a593Smuzhiyun 		pr_err("trying to pin a non attached buffer\n");
579*4882a593Smuzhiyun 		return -EINVAL;
580*4882a593Smuzhiyun 	}
581*4882a593Smuzhiyun 
582*4882a593Smuzhiyun 	if (WARN_ON(buf->dma_sgt)) {
583*4882a593Smuzhiyun 		pr_err("dmabuf buffer is already pinned\n");
584*4882a593Smuzhiyun 		return 0;
585*4882a593Smuzhiyun 	}
586*4882a593Smuzhiyun 
587*4882a593Smuzhiyun 	/* get the associated scatterlist for this buffer */
588*4882a593Smuzhiyun 	sgt = dma_buf_map_attachment(buf->db_attach, buf->dma_dir);
589*4882a593Smuzhiyun 	if (IS_ERR(sgt)) {
590*4882a593Smuzhiyun 		pr_err("Error getting dmabuf scatterlist\n");
591*4882a593Smuzhiyun 		return -EINVAL;
592*4882a593Smuzhiyun 	}
593*4882a593Smuzhiyun 
594*4882a593Smuzhiyun 	buf->dma_sgt = sgt;
595*4882a593Smuzhiyun 	buf->vaddr = NULL;
596*4882a593Smuzhiyun 
597*4882a593Smuzhiyun 	return 0;
598*4882a593Smuzhiyun }
599*4882a593Smuzhiyun 
vb2_cma_sg_unmap_dmabuf(void * mem_priv)600*4882a593Smuzhiyun static void vb2_cma_sg_unmap_dmabuf(void *mem_priv)
601*4882a593Smuzhiyun {
602*4882a593Smuzhiyun 	struct vb2_cma_sg_buf *buf = mem_priv;
603*4882a593Smuzhiyun 	struct sg_table *sgt = buf->dma_sgt;
604*4882a593Smuzhiyun 
605*4882a593Smuzhiyun 	if (WARN_ON(!buf->db_attach)) {
606*4882a593Smuzhiyun 		pr_err("trying to unpin a not attached buffer\n");
607*4882a593Smuzhiyun 		return;
608*4882a593Smuzhiyun 	}
609*4882a593Smuzhiyun 
610*4882a593Smuzhiyun 	if (WARN_ON(!sgt)) {
611*4882a593Smuzhiyun 		pr_err("dmabuf buffer is already unpinned\n");
612*4882a593Smuzhiyun 		return;
613*4882a593Smuzhiyun 	}
614*4882a593Smuzhiyun 
615*4882a593Smuzhiyun 	if (buf->vaddr) {
616*4882a593Smuzhiyun 		dma_buf_vunmap(buf->db_attach->dmabuf, buf->vaddr);
617*4882a593Smuzhiyun 		buf->vaddr = NULL;
618*4882a593Smuzhiyun 	}
619*4882a593Smuzhiyun 	dma_buf_unmap_attachment(buf->db_attach, sgt, buf->dma_dir);
620*4882a593Smuzhiyun 
621*4882a593Smuzhiyun 	buf->dma_sgt = NULL;
622*4882a593Smuzhiyun }
623*4882a593Smuzhiyun 
vb2_cma_sg_detach_dmabuf(void * mem_priv)624*4882a593Smuzhiyun static void vb2_cma_sg_detach_dmabuf(void *mem_priv)
625*4882a593Smuzhiyun {
626*4882a593Smuzhiyun 	struct vb2_cma_sg_buf *buf = mem_priv;
627*4882a593Smuzhiyun 
628*4882a593Smuzhiyun 	/* if vb2 works correctly you should never detach mapped buffer */
629*4882a593Smuzhiyun 	if (WARN_ON(buf->dma_sgt))
630*4882a593Smuzhiyun 		vb2_cma_sg_unmap_dmabuf(buf);
631*4882a593Smuzhiyun 
632*4882a593Smuzhiyun 	/* detach this attachment */
633*4882a593Smuzhiyun 	dma_buf_detach(buf->db_attach->dmabuf, buf->db_attach);
634*4882a593Smuzhiyun 	kfree(buf);
635*4882a593Smuzhiyun }
636*4882a593Smuzhiyun 
vb2_cma_sg_attach_dmabuf(struct device * dev,struct dma_buf * dbuf,unsigned long size,enum dma_data_direction dma_dir)637*4882a593Smuzhiyun static void *vb2_cma_sg_attach_dmabuf(struct device *dev, struct dma_buf *dbuf,
638*4882a593Smuzhiyun 	unsigned long size, enum dma_data_direction dma_dir)
639*4882a593Smuzhiyun {
640*4882a593Smuzhiyun 	struct vb2_cma_sg_buf *buf;
641*4882a593Smuzhiyun 	struct dma_buf_attachment *dba;
642*4882a593Smuzhiyun 
643*4882a593Smuzhiyun 	if (WARN_ON(!dev))
644*4882a593Smuzhiyun 		return ERR_PTR(-EINVAL);
645*4882a593Smuzhiyun 
646*4882a593Smuzhiyun 	if (dbuf->size < size)
647*4882a593Smuzhiyun 		return ERR_PTR(-EFAULT);
648*4882a593Smuzhiyun 
649*4882a593Smuzhiyun 	buf = kzalloc(sizeof(*buf), GFP_KERNEL);
650*4882a593Smuzhiyun 	if (!buf)
651*4882a593Smuzhiyun 		return ERR_PTR(-ENOMEM);
652*4882a593Smuzhiyun 
653*4882a593Smuzhiyun 	buf->dev = dev;
654*4882a593Smuzhiyun 	/* create attachment for the dmabuf with the user device */
655*4882a593Smuzhiyun 	dba = dma_buf_attach(dbuf, buf->dev);
656*4882a593Smuzhiyun 	if (IS_ERR(dba)) {
657*4882a593Smuzhiyun 		pr_err("failed to attach dmabuf\n");
658*4882a593Smuzhiyun 		kfree(buf);
659*4882a593Smuzhiyun 		return dba;
660*4882a593Smuzhiyun 	}
661*4882a593Smuzhiyun 
662*4882a593Smuzhiyun 	buf->dma_dir = dma_dir;
663*4882a593Smuzhiyun 	buf->size = size;
664*4882a593Smuzhiyun 	buf->db_attach = dba;
665*4882a593Smuzhiyun 
666*4882a593Smuzhiyun 	return buf;
667*4882a593Smuzhiyun }
668*4882a593Smuzhiyun 
vb2_cma_sg_cookie(void * buf_priv)669*4882a593Smuzhiyun static void *vb2_cma_sg_cookie(void *buf_priv)
670*4882a593Smuzhiyun {
671*4882a593Smuzhiyun 	struct vb2_cma_sg_buf *buf = buf_priv;
672*4882a593Smuzhiyun 
673*4882a593Smuzhiyun 	return buf->dma_sgt;
674*4882a593Smuzhiyun }
675*4882a593Smuzhiyun 
676*4882a593Smuzhiyun const struct vb2_mem_ops vb2_cma_sg_memops = {
677*4882a593Smuzhiyun 	.alloc		= vb2_cma_sg_alloc,
678*4882a593Smuzhiyun 	.put		= vb2_cma_sg_put,
679*4882a593Smuzhiyun 	.get_userptr	= vb2_cma_sg_get_userptr,
680*4882a593Smuzhiyun 	.put_userptr	= vb2_cma_sg_put_userptr,
681*4882a593Smuzhiyun 	.prepare	= vb2_cma_sg_prepare,
682*4882a593Smuzhiyun 	.finish		= vb2_cma_sg_finish,
683*4882a593Smuzhiyun 	.vaddr		= vb2_cma_sg_vaddr,
684*4882a593Smuzhiyun 	.mmap		= vb2_cma_sg_mmap,
685*4882a593Smuzhiyun 	.num_users	= vb2_cma_sg_num_users,
686*4882a593Smuzhiyun 	.get_dmabuf	= vb2_cma_sg_get_dmabuf,
687*4882a593Smuzhiyun 	.map_dmabuf	= vb2_cma_sg_map_dmabuf,
688*4882a593Smuzhiyun 	.unmap_dmabuf	= vb2_cma_sg_unmap_dmabuf,
689*4882a593Smuzhiyun 	.attach_dmabuf	= vb2_cma_sg_attach_dmabuf,
690*4882a593Smuzhiyun 	.detach_dmabuf	= vb2_cma_sg_detach_dmabuf,
691*4882a593Smuzhiyun 	.cookie		= vb2_cma_sg_cookie,
692*4882a593Smuzhiyun };
693*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(vb2_cma_sg_memops);
694*4882a593Smuzhiyun 
695*4882a593Smuzhiyun MODULE_LICENSE("GPL");
696