xref: /OK3568_Linux_fs/kernel/drivers/dma-buf/heaps/rk_cma_heap.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * DMABUF CMA heap exporter
4  *
5  * Copyright (C) 2012, 2019, 2020 Linaro Ltd.
6  * Author: <benjamin.gaignard@linaro.org> for ST-Ericsson.
7  *
8  * Also utilizing parts of Andrew Davis' SRAM heap:
9  * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com/
10  *	Andrew F. Davis <afd@ti.com>
11  *
12  * Copyright (C) 2021, 2022 Rockchip Electronics Co. Ltd.
13  */
14 
15 #include <linux/cma.h>
16 #include <linux/dma-buf.h>
17 #include <linux/dma-heap.h>
18 #include <linux/dma-map-ops.h>
19 #include <linux/err.h>
20 #include <linux/highmem.h>
21 #include <linux/io.h>
22 #include <linux/mm.h>
23 #include <linux/module.h>
24 #include <linux/scatterlist.h>
25 #include <linux/slab.h>
26 #include <linux/vmalloc.h>
27 #include <uapi/linux/dma-heap.h>
28 
29 struct cma_heap {
30 	struct dma_heap *heap;
31 	struct cma *cma;
32 };
33 
34 struct cma_heap_buffer {
35 	struct cma_heap *heap;
36 	struct list_head attachments;
37 	struct mutex lock;
38 	unsigned long len;
39 	struct page *cma_pages;
40 	struct page **pages;
41 	pgoff_t pagecount;
42 	int vmap_cnt;
43 	void *vaddr;
44 
45 	bool uncached;
46 };
47 
48 struct dma_heap_attachment {
49 	struct device *dev;
50 	struct sg_table table;
51 	struct list_head list;
52 	bool mapped;
53 
54 	bool uncached;
55 };
56 
cma_heap_attach(struct dma_buf * dmabuf,struct dma_buf_attachment * attachment)57 static int cma_heap_attach(struct dma_buf *dmabuf,
58 			   struct dma_buf_attachment *attachment)
59 {
60 	struct cma_heap_buffer *buffer = dmabuf->priv;
61 	struct dma_heap_attachment *a;
62 	int ret;
63 
64 	a = kzalloc(sizeof(*a), GFP_KERNEL);
65 	if (!a)
66 		return -ENOMEM;
67 
68 	ret = sg_alloc_table_from_pages(&a->table, buffer->pages,
69 					buffer->pagecount, 0,
70 					buffer->pagecount << PAGE_SHIFT,
71 					GFP_KERNEL);
72 	if (ret) {
73 		kfree(a);
74 		return ret;
75 	}
76 
77 	a->dev = attachment->dev;
78 	INIT_LIST_HEAD(&a->list);
79 	a->mapped = false;
80 
81 	a->uncached = buffer->uncached;
82 	attachment->priv = a;
83 
84 	mutex_lock(&buffer->lock);
85 	list_add(&a->list, &buffer->attachments);
86 	mutex_unlock(&buffer->lock);
87 
88 	return 0;
89 }
90 
cma_heap_detach(struct dma_buf * dmabuf,struct dma_buf_attachment * attachment)91 static void cma_heap_detach(struct dma_buf *dmabuf,
92 			    struct dma_buf_attachment *attachment)
93 {
94 	struct cma_heap_buffer *buffer = dmabuf->priv;
95 	struct dma_heap_attachment *a = attachment->priv;
96 
97 	mutex_lock(&buffer->lock);
98 	list_del(&a->list);
99 	mutex_unlock(&buffer->lock);
100 
101 	sg_free_table(&a->table);
102 	kfree(a);
103 }
104 
cma_heap_map_dma_buf(struct dma_buf_attachment * attachment,enum dma_data_direction direction)105 static struct sg_table *cma_heap_map_dma_buf(struct dma_buf_attachment *attachment,
106 					     enum dma_data_direction direction)
107 {
108 	struct dma_heap_attachment *a = attachment->priv;
109 	struct sg_table *table = &a->table;
110 	int attrs = attachment->dma_map_attrs;
111 	int ret;
112 
113 	if (a->uncached)
114 		attrs |= DMA_ATTR_SKIP_CPU_SYNC;
115 
116 	ret = dma_map_sgtable(attachment->dev, table, direction, attrs);
117 	if (ret)
118 		return ERR_PTR(-ENOMEM);
119 	a->mapped = true;
120 	return table;
121 }
122 
cma_heap_unmap_dma_buf(struct dma_buf_attachment * attachment,struct sg_table * table,enum dma_data_direction direction)123 static void cma_heap_unmap_dma_buf(struct dma_buf_attachment *attachment,
124 				   struct sg_table *table,
125 				   enum dma_data_direction direction)
126 {
127 	struct dma_heap_attachment *a = attachment->priv;
128 	int attrs = attachment->dma_map_attrs;
129 
130 	a->mapped = false;
131 
132 	if (a->uncached)
133 		attrs |= DMA_ATTR_SKIP_CPU_SYNC;
134 
135 	dma_unmap_sgtable(attachment->dev, table, direction, attrs);
136 }
137 
138 static int __maybe_unused
cma_heap_dma_buf_begin_cpu_access_partial(struct dma_buf * dmabuf,enum dma_data_direction direction,unsigned int offset,unsigned int len)139 cma_heap_dma_buf_begin_cpu_access_partial(struct dma_buf *dmabuf,
140 					  enum dma_data_direction direction,
141 					  unsigned int offset,
142 					  unsigned int len)
143 {
144 	struct cma_heap_buffer *buffer = dmabuf->priv;
145 	phys_addr_t phys = page_to_phys(buffer->cma_pages);
146 
147 	if (buffer->vmap_cnt)
148 		invalidate_kernel_vmap_range(buffer->vaddr, buffer->len);
149 
150 	if (buffer->uncached)
151 		return 0;
152 
153 	mutex_lock(&buffer->lock);
154 	dma_sync_single_for_cpu(dma_heap_get_dev(buffer->heap->heap),
155 				phys + offset,
156 				len,
157 				direction);
158 	mutex_unlock(&buffer->lock);
159 
160 	return 0;
161 }
162 
163 static int __maybe_unused
cma_heap_dma_buf_end_cpu_access_partial(struct dma_buf * dmabuf,enum dma_data_direction direction,unsigned int offset,unsigned int len)164 cma_heap_dma_buf_end_cpu_access_partial(struct dma_buf *dmabuf,
165 					enum dma_data_direction direction,
166 					unsigned int offset,
167 					unsigned int len)
168 {
169 	struct cma_heap_buffer *buffer = dmabuf->priv;
170 	phys_addr_t phys = page_to_phys(buffer->cma_pages);
171 
172 	if (buffer->vmap_cnt)
173 		flush_kernel_vmap_range(buffer->vaddr, buffer->len);
174 
175 	if (buffer->uncached)
176 		return 0;
177 
178 	mutex_lock(&buffer->lock);
179 	dma_sync_single_for_device(dma_heap_get_dev(buffer->heap->heap),
180 				   phys + offset,
181 				   len,
182 				   direction);
183 	mutex_unlock(&buffer->lock);
184 
185 	return 0;
186 }
187 
cma_heap_dma_buf_begin_cpu_access(struct dma_buf * dmabuf,enum dma_data_direction direction)188 static int cma_heap_dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
189 					     enum dma_data_direction direction)
190 {
191 	struct cma_heap_buffer *buffer = dmabuf->priv;
192 	struct dma_heap_attachment *a;
193 
194 	if (buffer->vmap_cnt)
195 		invalidate_kernel_vmap_range(buffer->vaddr, buffer->len);
196 
197 	mutex_lock(&buffer->lock);
198 	list_for_each_entry(a, &buffer->attachments, list) {
199 		if (!a->mapped)
200 			continue;
201 		dma_sync_sgtable_for_cpu(a->dev, &a->table, direction);
202 	}
203 	mutex_unlock(&buffer->lock);
204 
205 	return 0;
206 }
207 
cma_heap_dma_buf_end_cpu_access(struct dma_buf * dmabuf,enum dma_data_direction direction)208 static int cma_heap_dma_buf_end_cpu_access(struct dma_buf *dmabuf,
209 					   enum dma_data_direction direction)
210 {
211 	struct cma_heap_buffer *buffer = dmabuf->priv;
212 	struct dma_heap_attachment *a;
213 
214 	if (buffer->vmap_cnt)
215 		flush_kernel_vmap_range(buffer->vaddr, buffer->len);
216 
217 	mutex_lock(&buffer->lock);
218 	list_for_each_entry(a, &buffer->attachments, list) {
219 		if (!a->mapped)
220 			continue;
221 		dma_sync_sgtable_for_device(a->dev, &a->table, direction);
222 	}
223 	mutex_unlock(&buffer->lock);
224 
225 	return 0;
226 }
227 
cma_heap_vm_fault(struct vm_fault * vmf)228 static vm_fault_t cma_heap_vm_fault(struct vm_fault *vmf)
229 {
230 	struct vm_area_struct *vma = vmf->vma;
231 	struct cma_heap_buffer *buffer = vma->vm_private_data;
232 
233 	if (vmf->pgoff > buffer->pagecount)
234 		return VM_FAULT_SIGBUS;
235 
236 	vmf->page = buffer->pages[vmf->pgoff];
237 	get_page(vmf->page);
238 
239 	return 0;
240 }
241 
242 static const struct vm_operations_struct dma_heap_vm_ops = {
243 	.fault = cma_heap_vm_fault,
244 };
245 
cma_heap_mmap(struct dma_buf * dmabuf,struct vm_area_struct * vma)246 static int cma_heap_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
247 {
248 	struct cma_heap_buffer *buffer = dmabuf->priv;
249 
250 	if ((vma->vm_flags & (VM_SHARED | VM_MAYSHARE)) == 0)
251 		return -EINVAL;
252 
253 	if (buffer->uncached)
254 		vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
255 
256 	vma->vm_ops = &dma_heap_vm_ops;
257 	vma->vm_private_data = buffer;
258 
259 	return 0;
260 }
261 
cma_heap_do_vmap(struct cma_heap_buffer * buffer)262 static void *cma_heap_do_vmap(struct cma_heap_buffer *buffer)
263 {
264 	void *vaddr;
265 	pgprot_t pgprot = PAGE_KERNEL;
266 
267 	if (buffer->uncached)
268 		pgprot = pgprot_writecombine(PAGE_KERNEL);
269 
270 	vaddr = vmap(buffer->pages, buffer->pagecount, VM_MAP, pgprot);
271 	if (!vaddr)
272 		return ERR_PTR(-ENOMEM);
273 
274 	return vaddr;
275 }
276 
cma_heap_vmap(struct dma_buf * dmabuf)277 static void *cma_heap_vmap(struct dma_buf *dmabuf)
278 {
279 	struct cma_heap_buffer *buffer = dmabuf->priv;
280 	void *vaddr;
281 
282 	mutex_lock(&buffer->lock);
283 	if (buffer->vmap_cnt) {
284 		buffer->vmap_cnt++;
285 		vaddr = buffer->vaddr;
286 		goto out;
287 	}
288 
289 	vaddr = cma_heap_do_vmap(buffer);
290 	if (IS_ERR(vaddr))
291 		goto out;
292 
293 	buffer->vaddr = vaddr;
294 	buffer->vmap_cnt++;
295 out:
296 	mutex_unlock(&buffer->lock);
297 
298 	return vaddr;
299 }
300 
cma_heap_vunmap(struct dma_buf * dmabuf,void * vaddr)301 static void cma_heap_vunmap(struct dma_buf *dmabuf, void *vaddr)
302 {
303 	struct cma_heap_buffer *buffer = dmabuf->priv;
304 
305 	mutex_lock(&buffer->lock);
306 	if (!--buffer->vmap_cnt) {
307 		vunmap(buffer->vaddr);
308 		buffer->vaddr = NULL;
309 	}
310 	mutex_unlock(&buffer->lock);
311 }
312 
cma_heap_dma_buf_release(struct dma_buf * dmabuf)313 static void cma_heap_dma_buf_release(struct dma_buf *dmabuf)
314 {
315 	struct cma_heap_buffer *buffer = dmabuf->priv;
316 	struct cma_heap *cma_heap = buffer->heap;
317 
318 	if (buffer->vmap_cnt > 0) {
319 		WARN(1, "%s: buffer still mapped in the kernel\n", __func__);
320 		vunmap(buffer->vaddr);
321 	}
322 
323 	/* free page list */
324 	kfree(buffer->pages);
325 	/* release memory */
326 	cma_release(cma_heap->cma, buffer->cma_pages, buffer->pagecount);
327 	kfree(buffer);
328 }
329 
330 static const struct dma_buf_ops cma_heap_buf_ops = {
331 	.attach = cma_heap_attach,
332 	.detach = cma_heap_detach,
333 	.map_dma_buf = cma_heap_map_dma_buf,
334 	.unmap_dma_buf = cma_heap_unmap_dma_buf,
335 	.begin_cpu_access = cma_heap_dma_buf_begin_cpu_access,
336 	.end_cpu_access = cma_heap_dma_buf_end_cpu_access,
337 #ifdef CONFIG_DMABUF_PARTIAL
338 	.begin_cpu_access_partial = cma_heap_dma_buf_begin_cpu_access_partial,
339 	.end_cpu_access_partial = cma_heap_dma_buf_end_cpu_access_partial,
340 #endif
341 	.mmap = cma_heap_mmap,
342 	.vmap = cma_heap_vmap,
343 	.vunmap = cma_heap_vunmap,
344 	.release = cma_heap_dma_buf_release,
345 };
346 
cma_heap_do_allocate(struct dma_heap * heap,unsigned long len,unsigned long fd_flags,unsigned long heap_flags,bool uncached)347 static struct dma_buf *cma_heap_do_allocate(struct dma_heap *heap,
348 					 unsigned long len,
349 					 unsigned long fd_flags,
350 					 unsigned long heap_flags, bool uncached)
351 {
352 	struct cma_heap *cma_heap = dma_heap_get_drvdata(heap);
353 	struct cma_heap_buffer *buffer;
354 	DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
355 	size_t size = PAGE_ALIGN(len);
356 	pgoff_t pagecount = size >> PAGE_SHIFT;
357 	unsigned long align = get_order(size);
358 	struct page *cma_pages;
359 	struct dma_buf *dmabuf;
360 	int ret = -ENOMEM;
361 	pgoff_t pg;
362 	dma_addr_t dma;
363 
364 	buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
365 	if (!buffer)
366 		return ERR_PTR(-ENOMEM);
367 
368 	buffer->uncached = uncached;
369 
370 	INIT_LIST_HEAD(&buffer->attachments);
371 	mutex_init(&buffer->lock);
372 	buffer->len = size;
373 
374 	if (align > CONFIG_CMA_ALIGNMENT)
375 		align = CONFIG_CMA_ALIGNMENT;
376 
377 	cma_pages = cma_alloc(cma_heap->cma, pagecount, align, GFP_KERNEL);
378 	if (!cma_pages)
379 		goto free_buffer;
380 
381 	/* Clear the cma pages */
382 	if (PageHighMem(cma_pages)) {
383 		unsigned long nr_clear_pages = pagecount;
384 		struct page *page = cma_pages;
385 
386 		while (nr_clear_pages > 0) {
387 			void *vaddr = kmap_atomic(page);
388 
389 			memset(vaddr, 0, PAGE_SIZE);
390 			kunmap_atomic(vaddr);
391 			/*
392 			 * Avoid wasting time zeroing memory if the process
393 			 * has been killed by by SIGKILL
394 			 */
395 			if (fatal_signal_pending(current))
396 				goto free_cma;
397 			page++;
398 			nr_clear_pages--;
399 		}
400 	} else {
401 		memset(page_address(cma_pages), 0, size);
402 	}
403 
404 	buffer->pages = kmalloc_array(pagecount, sizeof(*buffer->pages), GFP_KERNEL);
405 	if (!buffer->pages) {
406 		ret = -ENOMEM;
407 		goto free_cma;
408 	}
409 
410 	for (pg = 0; pg < pagecount; pg++)
411 		buffer->pages[pg] = &cma_pages[pg];
412 
413 	buffer->cma_pages = cma_pages;
414 	buffer->heap = cma_heap;
415 	buffer->pagecount = pagecount;
416 
417 	/* create the dmabuf */
418 	exp_info.exp_name = dma_heap_get_name(heap);
419 	exp_info.ops = &cma_heap_buf_ops;
420 	exp_info.size = buffer->len;
421 	exp_info.flags = fd_flags;
422 	exp_info.priv = buffer;
423 	dmabuf = dma_buf_export(&exp_info);
424 	if (IS_ERR(dmabuf)) {
425 		ret = PTR_ERR(dmabuf);
426 		goto free_pages;
427 	}
428 
429 	if (buffer->uncached) {
430 		dma = dma_map_page(dma_heap_get_dev(heap), buffer->cma_pages, 0,
431 			     buffer->pagecount * PAGE_SIZE, DMA_FROM_DEVICE);
432 		dma_unmap_page(dma_heap_get_dev(heap), dma,
433 			       buffer->pagecount * PAGE_SIZE, DMA_FROM_DEVICE);
434 	}
435 
436 	return dmabuf;
437 
438 free_pages:
439 	kfree(buffer->pages);
440 free_cma:
441 	cma_release(cma_heap->cma, cma_pages, pagecount);
442 free_buffer:
443 	kfree(buffer);
444 
445 	return ERR_PTR(ret);
446 }
447 
cma_heap_allocate(struct dma_heap * heap,unsigned long len,unsigned long fd_flags,unsigned long heap_flags)448 static struct dma_buf *cma_heap_allocate(struct dma_heap *heap,
449 					 unsigned long len,
450 					 unsigned long fd_flags,
451 					 unsigned long heap_flags)
452 {
453 	return cma_heap_do_allocate(heap, len, fd_flags, heap_flags, false);
454 }
455 
456 #if IS_ENABLED(CONFIG_NO_GKI)
cma_heap_get_phys(struct dma_heap * heap,struct dma_heap_phys_data * phys)457 static int cma_heap_get_phys(struct dma_heap *heap,
458 			     struct dma_heap_phys_data *phys)
459 {
460 	struct cma_heap *cma_heap = dma_heap_get_drvdata(heap);
461 	struct cma_heap_buffer *buffer;
462 	struct dma_buf *dmabuf;
463 
464 	phys->paddr = (__u64)-1;
465 
466 	if (IS_ERR_OR_NULL(phys))
467 		return -EINVAL;
468 
469 	dmabuf = dma_buf_get(phys->fd);
470 	if (IS_ERR_OR_NULL(dmabuf))
471 		return -EBADFD;
472 
473 	buffer = dmabuf->priv;
474 	if (IS_ERR_OR_NULL(buffer))
475 		goto err;
476 
477 	if (buffer->heap != cma_heap)
478 		goto err;
479 
480 	phys->paddr = page_to_phys(buffer->cma_pages);
481 
482 err:
483 	dma_buf_put(dmabuf);
484 
485 	return (phys->paddr == (__u64)-1) ? -EINVAL : 0;
486 }
487 #endif
488 
489 static const struct dma_heap_ops cma_heap_ops = {
490 	.allocate = cma_heap_allocate,
491 #if IS_ENABLED(CONFIG_NO_GKI)
492 	.get_phys = cma_heap_get_phys,
493 #endif
494 };
495 
cma_uncached_heap_allocate(struct dma_heap * heap,unsigned long len,unsigned long fd_flags,unsigned long heap_flags)496 static struct dma_buf *cma_uncached_heap_allocate(struct dma_heap *heap,
497 					 unsigned long len,
498 					 unsigned long fd_flags,
499 					 unsigned long heap_flags)
500 {
501 	return cma_heap_do_allocate(heap, len, fd_flags, heap_flags, true);
502 }
503 
cma_uncached_heap_not_initialized(struct dma_heap * heap,unsigned long len,unsigned long fd_flags,unsigned long heap_flags)504 static struct dma_buf *cma_uncached_heap_not_initialized(struct dma_heap *heap,
505 					 unsigned long len,
506 					 unsigned long fd_flags,
507 					 unsigned long heap_flags)
508 {
509 	pr_info("heap %s not initialized\n", dma_heap_get_name(heap));
510 	return ERR_PTR(-EBUSY);
511 }
512 
513 static struct dma_heap_ops cma_uncached_heap_ops = {
514 	.allocate = cma_uncached_heap_not_initialized,
515 };
516 
set_heap_dev_dma(struct device * heap_dev)517 static int set_heap_dev_dma(struct device *heap_dev)
518 {
519 	int err = 0;
520 
521 	if (!heap_dev)
522 		return -EINVAL;
523 
524 	dma_coerce_mask_and_coherent(heap_dev, DMA_BIT_MASK(64));
525 
526 	if (!heap_dev->dma_parms) {
527 		heap_dev->dma_parms = devm_kzalloc(heap_dev,
528 						   sizeof(*heap_dev->dma_parms),
529 						   GFP_KERNEL);
530 		if (!heap_dev->dma_parms)
531 			return -ENOMEM;
532 
533 		err = dma_set_max_seg_size(heap_dev, (unsigned int)DMA_BIT_MASK(64));
534 		if (err) {
535 			devm_kfree(heap_dev, heap_dev->dma_parms);
536 			dev_err(heap_dev, "Failed to set DMA segment size, err:%d\n", err);
537 			return err;
538 		}
539 	}
540 
541 	return 0;
542 }
543 
__add_cma_heap(struct cma * cma,void * data)544 static int __add_cma_heap(struct cma *cma, void *data)
545 {
546 	struct cma_heap *cma_heap, *cma_uncached_heap;
547 	struct dma_heap_export_info exp_info;
548 	int ret;
549 
550 	cma_heap = kzalloc(sizeof(*cma_heap), GFP_KERNEL);
551 	if (!cma_heap)
552 		return -ENOMEM;
553 	cma_heap->cma = cma;
554 
555 	exp_info.name = "cma";
556 	exp_info.ops = &cma_heap_ops;
557 	exp_info.priv = cma_heap;
558 
559 	cma_heap->heap = dma_heap_add(&exp_info);
560 	if (IS_ERR(cma_heap->heap)) {
561 		ret = PTR_ERR(cma_heap->heap);
562 		goto free_cma_heap;
563 	}
564 
565 	cma_uncached_heap = kzalloc(sizeof(*cma_heap), GFP_KERNEL);
566 	if (!cma_uncached_heap) {
567 		ret = -ENOMEM;
568 		goto put_cma_heap;
569 	}
570 
571 	cma_uncached_heap->cma = cma;
572 
573 	exp_info.name = "cma-uncached";
574 	exp_info.ops = &cma_uncached_heap_ops;
575 	exp_info.priv = cma_uncached_heap;
576 
577 	cma_uncached_heap->heap = dma_heap_add(&exp_info);
578 	if (IS_ERR(cma_uncached_heap->heap)) {
579 		ret = PTR_ERR(cma_uncached_heap->heap);
580 		goto free_uncached_cma_heap;
581 	}
582 
583 	ret = set_heap_dev_dma(dma_heap_get_dev(cma_uncached_heap->heap));
584 	if (ret)
585 		goto put_uncached_cma_heap;
586 
587 	mb(); /* make sure we only set allocate after dma_mask is set */
588 	cma_uncached_heap_ops.allocate = cma_uncached_heap_allocate;
589 
590 	return 0;
591 
592 put_uncached_cma_heap:
593 	dma_heap_put(cma_uncached_heap->heap);
594 free_uncached_cma_heap:
595 	kfree(cma_uncached_heap);
596 put_cma_heap:
597 	dma_heap_put(cma_heap->heap);
598 free_cma_heap:
599 	kfree(cma_heap);
600 
601 	return ret;
602 }
603 
add_default_cma_heap(void)604 static int add_default_cma_heap(void)
605 {
606 	struct cma *default_cma = dev_get_cma_area(NULL);
607 	int ret = 0;
608 
609 	if (default_cma)
610 		ret = __add_cma_heap(default_cma, NULL);
611 
612 	return ret;
613 }
614 module_init(add_default_cma_heap);
615 MODULE_DESCRIPTION("DMA-BUF CMA Heap");
616 MODULE_LICENSE("GPL");
617