xref: /OK3568_Linux_fs/kernel/drivers/media/common/videobuf2/videobuf2-cma-sg.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2022 Rockchip Electronics Co., Ltd
4  * Based on videobuf2-dma-sg.c
5  */
6 
7 #include <linux/module.h>
8 #include <linux/mm.h>
9 #include <linux/refcount.h>
10 #include <linux/rk-dma-heap.h>
11 #include <linux/scatterlist.h>
12 #include <linux/sched.h>
13 #include <linux/slab.h>
14 #include <linux/vmalloc.h>
15 #include <linux/dma-map-ops.h>
16 #include <linux/cma.h>
17 
18 #include <media/videobuf2-v4l2.h>
19 #include <media/videobuf2-memops.h>
20 #include <media/videobuf2-cma-sg.h>
21 
22 struct vb2_cma_sg_buf {
23 	struct device			*dev;
24 	void				*vaddr;
25 	struct page			**pages;
26 	struct frame_vector		*vec;
27 	int				offset;
28 	unsigned long			dma_attrs;
29 	enum dma_data_direction		dma_dir;
30 	struct sg_table			sg_table;
31 	/*
32 	 * This will point to sg_table when used with the MMAP or USERPTR
33 	 * memory model, and to the dma_buf sglist when used with the
34 	 * DMABUF memory model.
35 	 */
36 	struct sg_table			*dma_sgt;
37 	size_t				size;
38 	unsigned int			num_pages;
39 	refcount_t			refcount;
40 	struct vb2_vmarea_handler	handler;
41 
42 	struct dma_buf_attachment	*db_attach;
43 };
44 
45 static void vb2_cma_sg_put(void *buf_priv);
46 
vb2_cma_sg_alloc_compacted(struct vb2_cma_sg_buf * buf,gfp_t gfp_flags)47 static int vb2_cma_sg_alloc_compacted(struct vb2_cma_sg_buf *buf,
48 				      gfp_t gfp_flags)
49 {
50 	unsigned int last_page = 0;
51 	unsigned long size = buf->size;
52 
53 	while (size > 0) {
54 		struct page *pages;
55 		int order;
56 		int i;
57 
58 		order = get_order(size);
59 		/* Don't over allocate*/
60 		if ((PAGE_SIZE << order) > size)
61 			order--;
62 
63 		pages = NULL;
64 		while (!pages) {
65 			pages = alloc_pages(GFP_KERNEL | __GFP_ZERO |
66 					__GFP_NOWARN | gfp_flags, order);
67 			if (pages)
68 				break;
69 
70 			if (order == 0) {
71 				while (last_page--)
72 					__free_page(buf->pages[last_page]);
73 				return -ENOMEM;
74 			}
75 			order--;
76 		}
77 
78 		split_page(pages, order);
79 		for (i = 0; i < (1 << order); i++)
80 			buf->pages[last_page++] = &pages[i];
81 
82 		size -= PAGE_SIZE << order;
83 	}
84 
85 	return 0;
86 }
87 
vb2_cma_sg_free_compacted(struct vb2_cma_sg_buf * buf)88 static void vb2_cma_sg_free_compacted(struct vb2_cma_sg_buf *buf)
89 {
90 	int num_pages = buf->num_pages;
91 
92 	while (num_pages--) {
93 		__free_page(buf->pages[num_pages]);
94 		buf->pages[num_pages] = NULL;
95 	}
96 }
97 
vb2_cma_sg_alloc_contiguous(struct vb2_cma_sg_buf * buf)98 static int vb2_cma_sg_alloc_contiguous(struct vb2_cma_sg_buf *buf)
99 {
100 	struct rk_dma_heap *heap __maybe_unused;
101 	struct page *page = NULL;
102 	int i;
103 	bool cma_en = false;
104 
105 	if (IS_ENABLED(CONFIG_CMA)) {
106 		struct rk_dma_heap *heap = rk_dma_heap_find("rk-dma-heap-cma");
107 
108 		cma_en = true;
109 		if (heap)
110 			page = rk_dma_heap_alloc_contig_pages(heap, buf->size,
111 							      dev_name(buf->dev));
112 		else
113 			page = cma_alloc(dev_get_cma_area(buf->dev), buf->num_pages,
114 					 get_order(buf->size), GFP_KERNEL);
115 	}
116 	if (IS_ERR_OR_NULL(page)) {
117 		pr_err("%s: cma_en:%d alloc pages fail\n", __func__, cma_en);
118 		return -ENOMEM;
119 	}
120 	for (i = 0; i < buf->num_pages; i++)
121 		buf->pages[i] = page + i;
122 
123 	return 0;
124 }
125 
vb2_cma_sg_free_contiguous(struct vb2_cma_sg_buf * buf)126 static void vb2_cma_sg_free_contiguous(struct vb2_cma_sg_buf *buf)
127 {
128 	if (IS_ENABLED(CONFIG_CMA)) {
129 		struct rk_dma_heap *heap = rk_dma_heap_find("rk-dma-heap-cma");
130 
131 		if (heap)
132 			rk_dma_heap_free_contig_pages(heap, buf->pages[0],
133 						      buf->size, dev_name(buf->dev));
134 		else
135 			cma_release(dev_get_cma_area(buf->dev),
136 				    buf->pages[0], buf->num_pages);
137 	}
138 }
139 
vb2_cma_sg_alloc(struct device * dev,unsigned long dma_attrs,unsigned long size,enum dma_data_direction dma_dir,gfp_t gfp_flags)140 static void *vb2_cma_sg_alloc(struct device *dev, unsigned long dma_attrs,
141 			      unsigned long size,
142 			      enum dma_data_direction dma_dir,
143 			      gfp_t gfp_flags)
144 {
145 	struct vb2_cma_sg_buf *buf;
146 	struct sg_table *sgt;
147 	int ret;
148 
149 	if (WARN_ON(!dev))
150 		return ERR_PTR(-EINVAL);
151 
152 	buf = kzalloc(sizeof(*buf), GFP_KERNEL);
153 	if (!buf)
154 		return ERR_PTR(-ENOMEM);
155 
156 	buf->vaddr = NULL;
157 	buf->dma_attrs = dma_attrs;
158 	buf->dma_dir = dma_dir;
159 	buf->offset = 0;
160 	buf->size = size;
161 	/* size is already page aligned */
162 	buf->num_pages = size >> PAGE_SHIFT;
163 	buf->dma_sgt = &buf->sg_table;
164 	/* Prevent the device from being released while the buffer is used */
165 	buf->dev = get_device(dev);
166 
167 	buf->pages = kvmalloc_array(buf->num_pages, sizeof(struct page *),
168 				    GFP_KERNEL | __GFP_ZERO);
169 	if (!buf->pages)
170 		goto fail_pages_array_alloc;
171 
172 	if (dma_attrs & DMA_ATTR_FORCE_CONTIGUOUS)
173 		ret = vb2_cma_sg_alloc_contiguous(buf);
174 	else
175 		ret = vb2_cma_sg_alloc_compacted(buf, gfp_flags);
176 	if (ret)
177 		goto fail_pages_alloc;
178 
179 	ret = sg_alloc_table_from_pages(buf->dma_sgt, buf->pages,
180 			buf->num_pages, 0, size, GFP_KERNEL);
181 	if (ret)
182 		goto fail_table_alloc;
183 
184 	sgt = &buf->sg_table;
185 	/*
186 	 * No need to sync to the device, this will happen later when the
187 	 * prepare() memop is called.
188 	 */
189 	if (dma_map_sgtable(buf->dev, sgt, buf->dma_dir,
190 			    DMA_ATTR_SKIP_CPU_SYNC))
191 		goto fail_map;
192 
193 	buf->handler.refcount = &buf->refcount;
194 	buf->handler.put = vb2_cma_sg_put;
195 	buf->handler.arg = buf;
196 
197 	refcount_set(&buf->refcount, 1);
198 
199 	return buf;
200 
201 fail_map:
202 	sg_free_table(buf->dma_sgt);
203 fail_table_alloc:
204 	if (dma_attrs & DMA_ATTR_FORCE_CONTIGUOUS)
205 		vb2_cma_sg_free_contiguous(buf);
206 	else
207 		vb2_cma_sg_free_compacted(buf);
208 fail_pages_alloc:
209 	kvfree(buf->pages);
210 fail_pages_array_alloc:
211 	put_device(buf->dev);
212 	kfree(buf);
213 	return ERR_PTR(-ENOMEM);
214 }
215 
vb2_cma_sg_put(void * buf_priv)216 static void vb2_cma_sg_put(void *buf_priv)
217 {
218 	struct vb2_cma_sg_buf *buf = buf_priv;
219 	struct sg_table *sgt = &buf->sg_table;
220 
221 	if (refcount_dec_and_test(&buf->refcount)) {
222 		dma_unmap_sgtable(buf->dev, sgt, buf->dma_dir,
223 				  DMA_ATTR_SKIP_CPU_SYNC);
224 		if (buf->vaddr)
225 			vm_unmap_ram(buf->vaddr, buf->num_pages);
226 		sg_free_table(buf->dma_sgt);
227 		if (buf->dma_attrs & DMA_ATTR_FORCE_CONTIGUOUS)
228 			vb2_cma_sg_free_contiguous(buf);
229 		else
230 			vb2_cma_sg_free_compacted(buf);
231 		kvfree(buf->pages);
232 		buf->pages = NULL;
233 		put_device(buf->dev);
234 		kfree(buf);
235 	}
236 }
237 
vb2_cma_sg_prepare(void * buf_priv)238 static void vb2_cma_sg_prepare(void *buf_priv)
239 {
240 	struct vb2_cma_sg_buf *buf = buf_priv;
241 	struct sg_table *sgt = buf->dma_sgt;
242 
243 	dma_sync_sgtable_for_device(buf->dev, sgt, buf->dma_dir);
244 }
245 
vb2_cma_sg_finish(void * buf_priv)246 static void vb2_cma_sg_finish(void *buf_priv)
247 {
248 	struct vb2_cma_sg_buf *buf = buf_priv;
249 	struct sg_table *sgt = buf->dma_sgt;
250 
251 	dma_sync_sgtable_for_cpu(buf->dev, sgt, buf->dma_dir);
252 }
253 
vb2_cma_sg_get_userptr(struct device * dev,unsigned long vaddr,unsigned long size,enum dma_data_direction dma_dir)254 static void *vb2_cma_sg_get_userptr(struct device *dev, unsigned long vaddr,
255 				    unsigned long size,
256 				    enum dma_data_direction dma_dir)
257 {
258 	struct vb2_cma_sg_buf *buf;
259 	struct sg_table *sgt;
260 	struct frame_vector *vec;
261 
262 	if (WARN_ON(!dev))
263 		return ERR_PTR(-EINVAL);
264 
265 	buf = kzalloc(sizeof(*buf), GFP_KERNEL);
266 	if (!buf)
267 		return ERR_PTR(-ENOMEM);
268 
269 	buf->vaddr = NULL;
270 	buf->dev = dev;
271 	buf->dma_dir = dma_dir;
272 	buf->offset = vaddr & ~PAGE_MASK;
273 	buf->size = size;
274 	buf->dma_sgt = &buf->sg_table;
275 	vec = vb2_create_framevec(vaddr, size);
276 	if (IS_ERR(vec))
277 		goto userptr_fail_pfnvec;
278 	buf->vec = vec;
279 
280 	buf->pages = frame_vector_pages(vec);
281 	if (IS_ERR(buf->pages))
282 		goto userptr_fail_sgtable;
283 	buf->num_pages = frame_vector_count(vec);
284 
285 	if (sg_alloc_table_from_pages(buf->dma_sgt, buf->pages,
286 			buf->num_pages, buf->offset, size, 0))
287 		goto userptr_fail_sgtable;
288 
289 	sgt = &buf->sg_table;
290 	/*
291 	 * No need to sync to the device, this will happen later when the
292 	 * prepare() memop is called.
293 	 */
294 	if (dma_map_sgtable(buf->dev, sgt, buf->dma_dir,
295 			    DMA_ATTR_SKIP_CPU_SYNC))
296 		goto userptr_fail_map;
297 
298 	return buf;
299 
300 userptr_fail_map:
301 	sg_free_table(&buf->sg_table);
302 userptr_fail_sgtable:
303 	vb2_destroy_framevec(vec);
304 userptr_fail_pfnvec:
305 	kfree(buf);
306 	return ERR_PTR(-ENOMEM);
307 }
308 
309 /*
310  * @put_userptr: inform the allocator that a USERPTR buffer will no longer
311  *		 be used
312  */
vb2_cma_sg_put_userptr(void * buf_priv)313 static void vb2_cma_sg_put_userptr(void *buf_priv)
314 {
315 	struct vb2_cma_sg_buf *buf = buf_priv;
316 	struct sg_table *sgt = &buf->sg_table;
317 	int i = buf->num_pages;
318 
319 	dma_unmap_sgtable(buf->dev, sgt, buf->dma_dir, DMA_ATTR_SKIP_CPU_SYNC);
320 	if (buf->vaddr)
321 		vm_unmap_ram(buf->vaddr, buf->num_pages);
322 	sg_free_table(buf->dma_sgt);
323 	if (buf->dma_dir == DMA_FROM_DEVICE ||
324 	    buf->dma_dir == DMA_BIDIRECTIONAL)
325 		while (--i >= 0)
326 			set_page_dirty_lock(buf->pages[i]);
327 	vb2_destroy_framevec(buf->vec);
328 	kfree(buf);
329 }
330 
vb2_cma_sg_vaddr(void * buf_priv)331 static void *vb2_cma_sg_vaddr(void *buf_priv)
332 {
333 	struct vb2_cma_sg_buf *buf = buf_priv;
334 
335 	WARN_ON(!buf);
336 
337 	if (!buf->vaddr) {
338 		if (buf->db_attach)
339 			buf->vaddr = dma_buf_vmap(buf->db_attach->dmabuf);
340 		else
341 			buf->vaddr = vm_map_ram(buf->pages, buf->num_pages, -1);
342 	}
343 
344 	/* add offset in case userptr is not page-aligned */
345 	return buf->vaddr ? buf->vaddr + buf->offset : NULL;
346 }
347 
vb2_cma_sg_num_users(void * buf_priv)348 static unsigned int vb2_cma_sg_num_users(void *buf_priv)
349 {
350 	struct vb2_cma_sg_buf *buf = buf_priv;
351 
352 	return refcount_read(&buf->refcount);
353 }
354 
vb2_cma_sg_mmap(void * buf_priv,struct vm_area_struct * vma)355 static int vb2_cma_sg_mmap(void *buf_priv, struct vm_area_struct *vma)
356 {
357 	struct vb2_cma_sg_buf *buf = buf_priv;
358 	int err;
359 
360 	if (!buf) {
361 		pr_err("No memory to map\n");
362 		return -EINVAL;
363 	}
364 
365 	err = vm_map_pages(vma, buf->pages, buf->num_pages);
366 	if (err) {
367 		pr_err("Remapping memory, error: %d\n", err);
368 		return err;
369 	}
370 
371 	/*
372 	 * Use common vm_area operations to track buffer refcount.
373 	 */
374 	vma->vm_private_data	= &buf->handler;
375 	vma->vm_ops		= &vb2_common_vm_ops;
376 
377 	vma->vm_ops->open(vma);
378 
379 	return 0;
380 }
381 
382 /*********************************************/
383 /*         DMABUF ops for exporters          */
384 /*********************************************/
385 
386 struct vb2_cma_sg_attachment {
387 	struct sg_table sgt;
388 	enum dma_data_direction dma_dir;
389 };
390 
vb2_cma_sg_dmabuf_ops_attach(struct dma_buf * dbuf,struct dma_buf_attachment * dbuf_attach)391 static int vb2_cma_sg_dmabuf_ops_attach(struct dma_buf *dbuf,
392 	struct dma_buf_attachment *dbuf_attach)
393 {
394 	struct vb2_cma_sg_attachment *attach;
395 	unsigned int i;
396 	struct scatterlist *rd, *wr;
397 	struct sg_table *sgt;
398 	struct vb2_cma_sg_buf *buf = dbuf->priv;
399 	int ret;
400 
401 	attach = kzalloc(sizeof(*attach), GFP_KERNEL);
402 	if (!attach)
403 		return -ENOMEM;
404 
405 	sgt = &attach->sgt;
406 	/* Copy the buf->base_sgt scatter list to the attachment, as we can't
407 	 * map the same scatter list to multiple attachments at the same time.
408 	 */
409 	ret = sg_alloc_table(sgt, buf->dma_sgt->orig_nents, GFP_KERNEL);
410 	if (ret) {
411 		kfree(attach);
412 		return -ENOMEM;
413 	}
414 
415 	rd = buf->dma_sgt->sgl;
416 	wr = sgt->sgl;
417 	for (i = 0; i < sgt->orig_nents; ++i) {
418 		sg_set_page(wr, sg_page(rd), rd->length, rd->offset);
419 		rd = sg_next(rd);
420 		wr = sg_next(wr);
421 	}
422 
423 	attach->dma_dir = DMA_NONE;
424 	dbuf_attach->priv = attach;
425 
426 	return 0;
427 }
428 
vb2_cma_sg_dmabuf_ops_detach(struct dma_buf * dbuf,struct dma_buf_attachment * db_attach)429 static void vb2_cma_sg_dmabuf_ops_detach(struct dma_buf *dbuf,
430 	struct dma_buf_attachment *db_attach)
431 {
432 	struct vb2_cma_sg_attachment *attach = db_attach->priv;
433 	struct sg_table *sgt;
434 
435 	if (!attach)
436 		return;
437 
438 	sgt = &attach->sgt;
439 
440 	/* release the scatterlist cache */
441 	if (attach->dma_dir != DMA_NONE)
442 		dma_unmap_sgtable(db_attach->dev, sgt, attach->dma_dir, 0);
443 	sg_free_table(sgt);
444 	kfree(attach);
445 	db_attach->priv = NULL;
446 }
447 
vb2_cma_sg_dmabuf_ops_map(struct dma_buf_attachment * db_attach,enum dma_data_direction dma_dir)448 static struct sg_table *vb2_cma_sg_dmabuf_ops_map(
449 	struct dma_buf_attachment *db_attach, enum dma_data_direction dma_dir)
450 {
451 	struct vb2_cma_sg_attachment *attach = db_attach->priv;
452 	/* stealing dmabuf mutex to serialize map/unmap operations */
453 	struct mutex *lock = &db_attach->dmabuf->lock;
454 	struct sg_table *sgt;
455 
456 	mutex_lock(lock);
457 
458 	sgt = &attach->sgt;
459 	/* return previously mapped sg table */
460 	if (attach->dma_dir == dma_dir) {
461 		mutex_unlock(lock);
462 		return sgt;
463 	}
464 
465 	/* release any previous cache */
466 	if (attach->dma_dir != DMA_NONE) {
467 		dma_unmap_sgtable(db_attach->dev, sgt, attach->dma_dir, 0);
468 		attach->dma_dir = DMA_NONE;
469 	}
470 
471 	/* mapping to the client with new direction */
472 	if (dma_map_sgtable(db_attach->dev, sgt, dma_dir, 0)) {
473 		pr_err("failed to map scatterlist\n");
474 		mutex_unlock(lock);
475 		return ERR_PTR(-EIO);
476 	}
477 
478 	attach->dma_dir = dma_dir;
479 
480 	mutex_unlock(lock);
481 
482 	return sgt;
483 }
484 
vb2_cma_sg_dmabuf_ops_unmap(struct dma_buf_attachment * db_attach,struct sg_table * sgt,enum dma_data_direction dma_dir)485 static void vb2_cma_sg_dmabuf_ops_unmap(struct dma_buf_attachment *db_attach,
486 	struct sg_table *sgt, enum dma_data_direction dma_dir)
487 {
488 	/* nothing to be done here */
489 }
490 
vb2_cma_sg_dmabuf_ops_release(struct dma_buf * dbuf)491 static void vb2_cma_sg_dmabuf_ops_release(struct dma_buf *dbuf)
492 {
493 	/* drop reference obtained in vb2_cma_sg_get_dmabuf */
494 	vb2_cma_sg_put(dbuf->priv);
495 }
496 
497 static int
vb2_cma_sg_dmabuf_ops_begin_cpu_access(struct dma_buf * dbuf,enum dma_data_direction direction)498 vb2_cma_sg_dmabuf_ops_begin_cpu_access(struct dma_buf *dbuf,
499 				       enum dma_data_direction direction)
500 {
501 	struct vb2_cma_sg_buf *buf = dbuf->priv;
502 	struct sg_table *sgt = buf->dma_sgt;
503 
504 	dma_sync_sgtable_for_cpu(buf->dev, sgt, buf->dma_dir);
505 	return 0;
506 }
507 
508 static int
vb2_cma_sg_dmabuf_ops_end_cpu_access(struct dma_buf * dbuf,enum dma_data_direction direction)509 vb2_cma_sg_dmabuf_ops_end_cpu_access(struct dma_buf *dbuf,
510 				     enum dma_data_direction direction)
511 {
512 	struct vb2_cma_sg_buf *buf = dbuf->priv;
513 	struct sg_table *sgt = buf->dma_sgt;
514 
515 	dma_sync_sgtable_for_device(buf->dev, sgt, buf->dma_dir);
516 	return 0;
517 }
518 
vb2_cma_sg_dmabuf_ops_vmap(struct dma_buf * dbuf)519 static void *vb2_cma_sg_dmabuf_ops_vmap(struct dma_buf *dbuf)
520 {
521 	struct vb2_cma_sg_buf *buf = dbuf->priv;
522 
523 	return vb2_cma_sg_vaddr(buf);
524 }
525 
vb2_cma_sg_dmabuf_ops_mmap(struct dma_buf * dbuf,struct vm_area_struct * vma)526 static int vb2_cma_sg_dmabuf_ops_mmap(struct dma_buf *dbuf,
527 	struct vm_area_struct *vma)
528 {
529 	return vb2_cma_sg_mmap(dbuf->priv, vma);
530 }
531 
532 static const struct dma_buf_ops vb2_cma_sg_dmabuf_ops = {
533 	.attach = vb2_cma_sg_dmabuf_ops_attach,
534 	.detach = vb2_cma_sg_dmabuf_ops_detach,
535 	.map_dma_buf = vb2_cma_sg_dmabuf_ops_map,
536 	.unmap_dma_buf = vb2_cma_sg_dmabuf_ops_unmap,
537 	.begin_cpu_access = vb2_cma_sg_dmabuf_ops_begin_cpu_access,
538 	.end_cpu_access = vb2_cma_sg_dmabuf_ops_end_cpu_access,
539 	.vmap = vb2_cma_sg_dmabuf_ops_vmap,
540 	.mmap = vb2_cma_sg_dmabuf_ops_mmap,
541 	.release = vb2_cma_sg_dmabuf_ops_release,
542 };
543 
vb2_cma_sg_get_dmabuf(void * buf_priv,unsigned long flags)544 static struct dma_buf *vb2_cma_sg_get_dmabuf(void *buf_priv, unsigned long flags)
545 {
546 	struct vb2_cma_sg_buf *buf = buf_priv;
547 	struct dma_buf *dbuf;
548 	DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
549 
550 	exp_info.ops = &vb2_cma_sg_dmabuf_ops;
551 	exp_info.size = buf->size;
552 	exp_info.flags = flags;
553 	exp_info.priv = buf;
554 
555 	if (WARN_ON(!buf->dma_sgt))
556 		return NULL;
557 
558 	dbuf = dma_buf_export(&exp_info);
559 	if (IS_ERR(dbuf))
560 		return NULL;
561 
562 	/* dmabuf keeps reference to vb2 buffer */
563 	refcount_inc(&buf->refcount);
564 
565 	return dbuf;
566 }
567 
568 /*********************************************/
569 /*       callbacks for DMABUF buffers        */
570 /*********************************************/
571 
vb2_cma_sg_map_dmabuf(void * mem_priv)572 static int vb2_cma_sg_map_dmabuf(void *mem_priv)
573 {
574 	struct vb2_cma_sg_buf *buf = mem_priv;
575 	struct sg_table *sgt;
576 
577 	if (WARN_ON(!buf->db_attach)) {
578 		pr_err("trying to pin a non attached buffer\n");
579 		return -EINVAL;
580 	}
581 
582 	if (WARN_ON(buf->dma_sgt)) {
583 		pr_err("dmabuf buffer is already pinned\n");
584 		return 0;
585 	}
586 
587 	/* get the associated scatterlist for this buffer */
588 	sgt = dma_buf_map_attachment(buf->db_attach, buf->dma_dir);
589 	if (IS_ERR(sgt)) {
590 		pr_err("Error getting dmabuf scatterlist\n");
591 		return -EINVAL;
592 	}
593 
594 	buf->dma_sgt = sgt;
595 	buf->vaddr = NULL;
596 
597 	return 0;
598 }
599 
vb2_cma_sg_unmap_dmabuf(void * mem_priv)600 static void vb2_cma_sg_unmap_dmabuf(void *mem_priv)
601 {
602 	struct vb2_cma_sg_buf *buf = mem_priv;
603 	struct sg_table *sgt = buf->dma_sgt;
604 
605 	if (WARN_ON(!buf->db_attach)) {
606 		pr_err("trying to unpin a not attached buffer\n");
607 		return;
608 	}
609 
610 	if (WARN_ON(!sgt)) {
611 		pr_err("dmabuf buffer is already unpinned\n");
612 		return;
613 	}
614 
615 	if (buf->vaddr) {
616 		dma_buf_vunmap(buf->db_attach->dmabuf, buf->vaddr);
617 		buf->vaddr = NULL;
618 	}
619 	dma_buf_unmap_attachment(buf->db_attach, sgt, buf->dma_dir);
620 
621 	buf->dma_sgt = NULL;
622 }
623 
vb2_cma_sg_detach_dmabuf(void * mem_priv)624 static void vb2_cma_sg_detach_dmabuf(void *mem_priv)
625 {
626 	struct vb2_cma_sg_buf *buf = mem_priv;
627 
628 	/* if vb2 works correctly you should never detach mapped buffer */
629 	if (WARN_ON(buf->dma_sgt))
630 		vb2_cma_sg_unmap_dmabuf(buf);
631 
632 	/* detach this attachment */
633 	dma_buf_detach(buf->db_attach->dmabuf, buf->db_attach);
634 	kfree(buf);
635 }
636 
vb2_cma_sg_attach_dmabuf(struct device * dev,struct dma_buf * dbuf,unsigned long size,enum dma_data_direction dma_dir)637 static void *vb2_cma_sg_attach_dmabuf(struct device *dev, struct dma_buf *dbuf,
638 	unsigned long size, enum dma_data_direction dma_dir)
639 {
640 	struct vb2_cma_sg_buf *buf;
641 	struct dma_buf_attachment *dba;
642 
643 	if (WARN_ON(!dev))
644 		return ERR_PTR(-EINVAL);
645 
646 	if (dbuf->size < size)
647 		return ERR_PTR(-EFAULT);
648 
649 	buf = kzalloc(sizeof(*buf), GFP_KERNEL);
650 	if (!buf)
651 		return ERR_PTR(-ENOMEM);
652 
653 	buf->dev = dev;
654 	/* create attachment for the dmabuf with the user device */
655 	dba = dma_buf_attach(dbuf, buf->dev);
656 	if (IS_ERR(dba)) {
657 		pr_err("failed to attach dmabuf\n");
658 		kfree(buf);
659 		return dba;
660 	}
661 
662 	buf->dma_dir = dma_dir;
663 	buf->size = size;
664 	buf->db_attach = dba;
665 
666 	return buf;
667 }
668 
vb2_cma_sg_cookie(void * buf_priv)669 static void *vb2_cma_sg_cookie(void *buf_priv)
670 {
671 	struct vb2_cma_sg_buf *buf = buf_priv;
672 
673 	return buf->dma_sgt;
674 }
675 
676 const struct vb2_mem_ops vb2_cma_sg_memops = {
677 	.alloc		= vb2_cma_sg_alloc,
678 	.put		= vb2_cma_sg_put,
679 	.get_userptr	= vb2_cma_sg_get_userptr,
680 	.put_userptr	= vb2_cma_sg_put_userptr,
681 	.prepare	= vb2_cma_sg_prepare,
682 	.finish		= vb2_cma_sg_finish,
683 	.vaddr		= vb2_cma_sg_vaddr,
684 	.mmap		= vb2_cma_sg_mmap,
685 	.num_users	= vb2_cma_sg_num_users,
686 	.get_dmabuf	= vb2_cma_sg_get_dmabuf,
687 	.map_dmabuf	= vb2_cma_sg_map_dmabuf,
688 	.unmap_dmabuf	= vb2_cma_sg_unmap_dmabuf,
689 	.attach_dmabuf	= vb2_cma_sg_attach_dmabuf,
690 	.detach_dmabuf	= vb2_cma_sg_detach_dmabuf,
691 	.cookie		= vb2_cma_sg_cookie,
692 };
693 EXPORT_SYMBOL_GPL(vb2_cma_sg_memops);
694 
695 MODULE_LICENSE("GPL");
696