xref: /OK3568_Linux_fs/kernel/drivers/dma-buf/rk_heaps/rk-cma-heap.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * DMABUF CMA heap exporter
4  *
5  * Copyright (C) 2012, 2019, 2020 Linaro Ltd.
6  * Author: <benjamin.gaignard@linaro.org> for ST-Ericsson.
7  *
8  * Also utilizing parts of Andrew Davis' SRAM heap:
9  * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com/
10  *	Andrew F. Davis <afd@ti.com>
11  *
12  * Copyright (C) 2022 Rockchip Electronics Co. Ltd.
13  * Author: Simon Xue <xxm@rock-chips.com>
14  */
15 
16 #include <linux/cma.h>
17 #include <linux/dma-buf.h>
18 #include <linux/dma-map-ops.h>
19 #include <linux/err.h>
20 #include <linux/highmem.h>
21 #include <linux/io.h>
22 #include <linux/mm.h>
23 #include <linux/module.h>
24 #include <linux/scatterlist.h>
25 #include <linux/slab.h>
26 #include <linux/vmalloc.h>
27 #include <uapi/linux/rk-dma-heap.h>
28 #include <linux/proc_fs.h>
29 #include "../../../mm/cma.h"
30 #include "rk-dma-heap.h"
31 
32 struct rk_cma_heap {
33 	struct rk_dma_heap *heap;
34 	struct cma *cma;
35 };
36 
37 struct rk_cma_heap_buffer {
38 	struct rk_cma_heap *heap;
39 	struct list_head attachments;
40 	struct mutex lock;
41 	unsigned long len;
42 	struct page *cma_pages;
43 	struct page **pages;
44 	pgoff_t pagecount;
45 	int vmap_cnt;
46 	void *vaddr;
47 	phys_addr_t phys;
48 	bool attached;
49 };
50 
51 struct rk_cma_heap_attachment {
52 	struct device *dev;
53 	struct sg_table table;
54 	struct list_head list;
55 	bool mapped;
56 };
57 
rk_cma_heap_attach(struct dma_buf * dmabuf,struct dma_buf_attachment * attachment)58 static int rk_cma_heap_attach(struct dma_buf *dmabuf,
59 			      struct dma_buf_attachment *attachment)
60 {
61 	struct rk_cma_heap_buffer *buffer = dmabuf->priv;
62 	struct rk_cma_heap_attachment *a;
63 	struct sg_table *table;
64 	size_t size = buffer->pagecount << PAGE_SHIFT;
65 	int ret;
66 
67 	a = kzalloc(sizeof(*a), GFP_KERNEL);
68 	if (!a)
69 		return -ENOMEM;
70 
71 	table = &a->table;
72 
73 	ret = sg_alloc_table(table, 1, GFP_KERNEL);
74 	if (ret) {
75 		kfree(a);
76 		return ret;
77 	}
78 	sg_set_page(table->sgl, buffer->cma_pages, PAGE_ALIGN(size), 0);
79 
80 	a->dev = attachment->dev;
81 	INIT_LIST_HEAD(&a->list);
82 	a->mapped = false;
83 
84 	attachment->priv = a;
85 
86 	buffer->attached = true;
87 
88 	mutex_lock(&buffer->lock);
89 	list_add(&a->list, &buffer->attachments);
90 	mutex_unlock(&buffer->lock);
91 
92 	return 0;
93 }
94 
rk_cma_heap_detach(struct dma_buf * dmabuf,struct dma_buf_attachment * attachment)95 static void rk_cma_heap_detach(struct dma_buf *dmabuf,
96 			       struct dma_buf_attachment *attachment)
97 {
98 	struct rk_cma_heap_buffer *buffer = dmabuf->priv;
99 	struct rk_cma_heap_attachment *a = attachment->priv;
100 
101 	mutex_lock(&buffer->lock);
102 	list_del(&a->list);
103 	mutex_unlock(&buffer->lock);
104 
105 	buffer->attached = false;
106 
107 	sg_free_table(&a->table);
108 	kfree(a);
109 }
110 
rk_cma_heap_map_dma_buf(struct dma_buf_attachment * attachment,enum dma_data_direction direction)111 static struct sg_table *rk_cma_heap_map_dma_buf(struct dma_buf_attachment *attachment,
112 						enum dma_data_direction direction)
113 {
114 	struct rk_cma_heap_attachment *a = attachment->priv;
115 	struct sg_table *table = &a->table;
116 	int attrs = attachment->dma_map_attrs;
117 	int ret;
118 
119 	ret = dma_map_sgtable(attachment->dev, table, direction, attrs);
120 	if (ret)
121 		return ERR_PTR(-ENOMEM);
122 	a->mapped = true;
123 	return table;
124 }
125 
rk_cma_heap_unmap_dma_buf(struct dma_buf_attachment * attachment,struct sg_table * table,enum dma_data_direction direction)126 static void rk_cma_heap_unmap_dma_buf(struct dma_buf_attachment *attachment,
127 				      struct sg_table *table,
128 				      enum dma_data_direction direction)
129 {
130 	struct rk_cma_heap_attachment *a = attachment->priv;
131 	int attrs = attachment->dma_map_attrs;
132 
133 	a->mapped = false;
134 	dma_unmap_sgtable(attachment->dev, table, direction, attrs);
135 }
136 
137 static int
rk_cma_heap_dma_buf_begin_cpu_access_partial(struct dma_buf * dmabuf,enum dma_data_direction direction,unsigned int offset,unsigned int len)138 rk_cma_heap_dma_buf_begin_cpu_access_partial(struct dma_buf *dmabuf,
139 					     enum dma_data_direction direction,
140 					     unsigned int offset,
141 					     unsigned int len)
142 {
143 	struct rk_cma_heap_buffer *buffer = dmabuf->priv;
144 	struct rk_cma_heap_attachment *a;
145 
146 	if (buffer->vmap_cnt)
147 		invalidate_kernel_vmap_range(buffer->vaddr, buffer->len);
148 
149 	mutex_lock(&buffer->lock);
150 	list_for_each_entry(a, &buffer->attachments, list) {
151 		if (!a->mapped)
152 			continue;
153 		dma_sync_sgtable_for_cpu(a->dev, &a->table, direction);
154 	}
155 
156 	/* For userspace that not attach yet */
157 	if (buffer->phys && !buffer->attached)
158 		dma_sync_single_for_cpu(rk_dma_heap_get_dev(buffer->heap->heap),
159 					buffer->phys + offset,
160 					len,
161 					direction);
162 	mutex_unlock(&buffer->lock);
163 
164 	return 0;
165 }
166 
167 static int
rk_cma_heap_dma_buf_end_cpu_access_partial(struct dma_buf * dmabuf,enum dma_data_direction direction,unsigned int offset,unsigned int len)168 rk_cma_heap_dma_buf_end_cpu_access_partial(struct dma_buf *dmabuf,
169 					   enum dma_data_direction direction,
170 					   unsigned int offset,
171 					   unsigned int len)
172 {
173 	struct rk_cma_heap_buffer *buffer = dmabuf->priv;
174 	struct rk_cma_heap_attachment *a;
175 
176 	if (buffer->vmap_cnt)
177 		flush_kernel_vmap_range(buffer->vaddr, buffer->len);
178 
179 	mutex_lock(&buffer->lock);
180 	list_for_each_entry(a, &buffer->attachments, list) {
181 		if (!a->mapped)
182 			continue;
183 		dma_sync_sgtable_for_device(a->dev, &a->table, direction);
184 	}
185 
186 	/* For userspace that not attach yet */
187 	if (buffer->phys && !buffer->attached)
188 		dma_sync_single_for_device(rk_dma_heap_get_dev(buffer->heap->heap),
189 					   buffer->phys + offset,
190 					   len,
191 					   direction);
192 	mutex_unlock(&buffer->lock);
193 
194 	return 0;
195 }
196 
rk_cma_heap_dma_buf_begin_cpu_access(struct dma_buf * dmabuf,enum dma_data_direction dir)197 static int rk_cma_heap_dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
198 						enum dma_data_direction dir)
199 {
200 	struct rk_cma_heap_buffer *buffer = dmabuf->priv;
201 	unsigned int len = buffer->pagecount * PAGE_SIZE;
202 
203 	return rk_cma_heap_dma_buf_begin_cpu_access_partial(dmabuf, dir, 0, len);
204 }
205 
rk_cma_heap_dma_buf_end_cpu_access(struct dma_buf * dmabuf,enum dma_data_direction dir)206 static int rk_cma_heap_dma_buf_end_cpu_access(struct dma_buf *dmabuf,
207 					      enum dma_data_direction dir)
208 {
209 	struct rk_cma_heap_buffer *buffer = dmabuf->priv;
210 	unsigned int len = buffer->pagecount * PAGE_SIZE;
211 
212 	return rk_cma_heap_dma_buf_end_cpu_access_partial(dmabuf, dir, 0, len);
213 }
214 
rk_cma_heap_mmap(struct dma_buf * dmabuf,struct vm_area_struct * vma)215 static int rk_cma_heap_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
216 {
217 	struct rk_cma_heap_buffer *buffer = dmabuf->priv;
218 	size_t size = vma->vm_end - vma->vm_start;
219 	int ret;
220 
221 	ret = remap_pfn_range(vma, vma->vm_start, __phys_to_pfn(buffer->phys),
222 			      size, vma->vm_page_prot);
223 	if (ret)
224 		return -EAGAIN;
225 
226 	return 0;
227 }
228 
rk_cma_heap_do_vmap(struct rk_cma_heap_buffer * buffer)229 static void *rk_cma_heap_do_vmap(struct rk_cma_heap_buffer *buffer)
230 {
231 	void *vaddr;
232 	pgprot_t pgprot = PAGE_KERNEL;
233 
234 	vaddr = vmap(buffer->pages, buffer->pagecount, VM_MAP, pgprot);
235 	if (!vaddr)
236 		return ERR_PTR(-ENOMEM);
237 
238 	return vaddr;
239 }
240 
rk_cma_heap_vmap(struct dma_buf * dmabuf)241 static void *rk_cma_heap_vmap(struct dma_buf *dmabuf)
242 {
243 	struct rk_cma_heap_buffer *buffer = dmabuf->priv;
244 	void *vaddr;
245 
246 	mutex_lock(&buffer->lock);
247 	if (buffer->vmap_cnt) {
248 		buffer->vmap_cnt++;
249 		vaddr = buffer->vaddr;
250 		goto out;
251 	}
252 
253 	vaddr = rk_cma_heap_do_vmap(buffer);
254 	if (IS_ERR(vaddr))
255 		goto out;
256 
257 	buffer->vaddr = vaddr;
258 	buffer->vmap_cnt++;
259 out:
260 	mutex_unlock(&buffer->lock);
261 
262 	return vaddr;
263 }
264 
rk_cma_heap_vunmap(struct dma_buf * dmabuf,void * vaddr)265 static void rk_cma_heap_vunmap(struct dma_buf *dmabuf, void *vaddr)
266 {
267 	struct rk_cma_heap_buffer *buffer = dmabuf->priv;
268 
269 	mutex_lock(&buffer->lock);
270 	if (!--buffer->vmap_cnt) {
271 		vunmap(buffer->vaddr);
272 		buffer->vaddr = NULL;
273 	}
274 	mutex_unlock(&buffer->lock);
275 }
276 
rk_cma_heap_remove_dmabuf_list(struct dma_buf * dmabuf)277 static void rk_cma_heap_remove_dmabuf_list(struct dma_buf *dmabuf)
278 {
279 	struct rk_dma_heap_dmabuf *buf;
280 	struct rk_cma_heap_buffer *buffer = dmabuf->priv;
281 	struct rk_cma_heap *cma_heap = buffer->heap;
282 	struct rk_dma_heap *heap = cma_heap->heap;
283 
284 	mutex_lock(&heap->dmabuf_lock);
285 	list_for_each_entry(buf, &heap->dmabuf_list, node) {
286 		if (buf->dmabuf == dmabuf) {
287 			dma_heap_print("<%s> free dmabuf<ino-%ld>@[%pa-%pa] to heap-<%s>\n",
288 				       dmabuf->name,
289 				       dmabuf->file->f_inode->i_ino,
290 				       &buf->start, &buf->end,
291 				       rk_dma_heap_get_name(heap));
292 			list_del(&buf->node);
293 			kfree(buf);
294 			break;
295 		}
296 	}
297 	mutex_unlock(&heap->dmabuf_lock);
298 }
299 
rk_cma_heap_add_dmabuf_list(struct dma_buf * dmabuf,const char * name)300 static int rk_cma_heap_add_dmabuf_list(struct dma_buf *dmabuf, const char *name)
301 {
302 	struct rk_dma_heap_dmabuf *buf;
303 	struct rk_cma_heap_buffer *buffer = dmabuf->priv;
304 	struct rk_cma_heap *cma_heap = buffer->heap;
305 	struct rk_dma_heap *heap = cma_heap->heap;
306 
307 	buf = kzalloc(sizeof(*buf), GFP_KERNEL);
308 	if (!buf)
309 		return -ENOMEM;
310 
311 	INIT_LIST_HEAD(&buf->node);
312 	buf->dmabuf = dmabuf;
313 	buf->start = buffer->phys;
314 	buf->end = buf->start + buffer->len - 1;
315 	mutex_lock(&heap->dmabuf_lock);
316 	list_add_tail(&buf->node, &heap->dmabuf_list);
317 	mutex_unlock(&heap->dmabuf_lock);
318 
319 	dma_heap_print("<%s> alloc dmabuf<ino-%ld>@[%pa-%pa] from heap-<%s>\n",
320 		       dmabuf->name, dmabuf->file->f_inode->i_ino,
321 		       &buf->start, &buf->end, rk_dma_heap_get_name(heap));
322 
323 	return 0;
324 }
325 
rk_cma_heap_remove_contig_list(struct rk_dma_heap * heap,struct page * page,const char * name)326 static int rk_cma_heap_remove_contig_list(struct rk_dma_heap *heap,
327 					  struct page *page, const char *name)
328 {
329 	struct rk_dma_heap_contig_buf *buf;
330 
331 	mutex_lock(&heap->contig_lock);
332 	list_for_each_entry(buf, &heap->contig_list, node) {
333 		if (buf->start == page_to_phys(page)) {
334 			dma_heap_print("<%s> free contig-buf@[%pa-%pa] to heap-<%s>\n",
335 				       buf->orig_alloc, &buf->start, &buf->end,
336 				       rk_dma_heap_get_name(heap));
337 			list_del(&buf->node);
338 			kfree(buf->orig_alloc);
339 			kfree(buf);
340 			break;
341 		}
342 	}
343 	mutex_unlock(&heap->contig_lock);
344 
345 	return 0;
346 }
347 
rk_cma_heap_add_contig_list(struct rk_dma_heap * heap,struct page * page,unsigned long size,const char * name)348 static int rk_cma_heap_add_contig_list(struct rk_dma_heap *heap,
349 				       struct page *page, unsigned long size,
350 				       const char *name)
351 {
352 	struct rk_dma_heap_contig_buf *buf;
353 	const char *name_tmp;
354 
355 	buf = kzalloc(sizeof(*buf), GFP_KERNEL);
356 	if (!buf)
357 		return -ENOMEM;
358 
359 	INIT_LIST_HEAD(&buf->node);
360 	if (!name)
361 		name_tmp = current->comm;
362 	else
363 		name_tmp = name;
364 
365 	buf->orig_alloc = kstrndup(name_tmp, RK_DMA_HEAP_NAME_LEN, GFP_KERNEL);
366 	if (!buf->orig_alloc) {
367 		kfree(buf);
368 		return -ENOMEM;
369 	}
370 
371 	buf->start = page_to_phys(page);
372 	buf->end = buf->start + size - 1;
373 	mutex_lock(&heap->contig_lock);
374 	list_add_tail(&buf->node, &heap->contig_list);
375 	mutex_unlock(&heap->contig_lock);
376 
377 	dma_heap_print("<%s> alloc contig-buf@[%pa-%pa] from heap-<%s>\n",
378 		       buf->orig_alloc, &buf->start, &buf->end,
379 		       rk_dma_heap_get_name(heap));
380 
381 	return 0;
382 }
383 
rk_cma_heap_dma_buf_release(struct dma_buf * dmabuf)384 static void rk_cma_heap_dma_buf_release(struct dma_buf *dmabuf)
385 {
386 	struct rk_cma_heap_buffer *buffer = dmabuf->priv;
387 	struct rk_cma_heap *cma_heap = buffer->heap;
388 	struct rk_dma_heap *heap = cma_heap->heap;
389 
390 	if (buffer->vmap_cnt > 0) {
391 		WARN(1, "%s: buffer still mapped in the kernel\n", __func__);
392 		vunmap(buffer->vaddr);
393 	}
394 
395 	rk_cma_heap_remove_dmabuf_list(dmabuf);
396 
397 	/* free page list */
398 	kfree(buffer->pages);
399 	/* release memory */
400 	cma_release(cma_heap->cma, buffer->cma_pages, buffer->pagecount);
401 	rk_dma_heap_total_dec(heap, buffer->len);
402 
403 	kfree(buffer);
404 }
405 
406 static const struct dma_buf_ops rk_cma_heap_buf_ops = {
407 	.cache_sgt_mapping = true,
408 	.attach = rk_cma_heap_attach,
409 	.detach = rk_cma_heap_detach,
410 	.map_dma_buf = rk_cma_heap_map_dma_buf,
411 	.unmap_dma_buf = rk_cma_heap_unmap_dma_buf,
412 	.begin_cpu_access = rk_cma_heap_dma_buf_begin_cpu_access,
413 	.end_cpu_access = rk_cma_heap_dma_buf_end_cpu_access,
414 	.begin_cpu_access_partial = rk_cma_heap_dma_buf_begin_cpu_access_partial,
415 	.end_cpu_access_partial = rk_cma_heap_dma_buf_end_cpu_access_partial,
416 	.mmap = rk_cma_heap_mmap,
417 	.vmap = rk_cma_heap_vmap,
418 	.vunmap = rk_cma_heap_vunmap,
419 	.release = rk_cma_heap_dma_buf_release,
420 };
421 
rk_cma_heap_allocate(struct rk_dma_heap * heap,unsigned long len,unsigned long fd_flags,unsigned long heap_flags,const char * name)422 static struct dma_buf *rk_cma_heap_allocate(struct rk_dma_heap *heap,
423 					    unsigned long len,
424 					    unsigned long fd_flags,
425 					    unsigned long heap_flags,
426 					    const char *name)
427 {
428 	struct rk_cma_heap *cma_heap = rk_dma_heap_get_drvdata(heap);
429 	struct rk_cma_heap_buffer *buffer;
430 	DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
431 	size_t size = PAGE_ALIGN(len);
432 	pgoff_t pagecount = size >> PAGE_SHIFT;
433 	unsigned long align = get_order(size);
434 	struct page *cma_pages;
435 	struct dma_buf *dmabuf;
436 	pgoff_t pg;
437 	int ret = -ENOMEM;
438 
439 	buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
440 	if (!buffer)
441 		return ERR_PTR(-ENOMEM);
442 
443 	INIT_LIST_HEAD(&buffer->attachments);
444 	mutex_init(&buffer->lock);
445 	buffer->len = size;
446 
447 	if (align > CONFIG_DMABUF_HEAPS_ROCKCHIP_CMA_ALIGNMENT)
448 		align = CONFIG_DMABUF_HEAPS_ROCKCHIP_CMA_ALIGNMENT;
449 
450 	cma_pages = cma_alloc(cma_heap->cma, pagecount, align, GFP_KERNEL);
451 	if (!cma_pages)
452 		goto free_buffer;
453 
454 	/* Clear the cma pages */
455 	if (PageHighMem(cma_pages)) {
456 		unsigned long nr_clear_pages = pagecount;
457 		struct page *page = cma_pages;
458 
459 		while (nr_clear_pages > 0) {
460 			void *vaddr = kmap_atomic(page);
461 
462 			memset(vaddr, 0, PAGE_SIZE);
463 			kunmap_atomic(vaddr);
464 			/*
465 			 * Avoid wasting time zeroing memory if the process
466 			 * has been killed by SIGKILL
467 			 */
468 			if (fatal_signal_pending(current))
469 				goto free_cma;
470 			page++;
471 			nr_clear_pages--;
472 		}
473 	} else {
474 		memset(page_address(cma_pages), 0, size);
475 	}
476 
477 	buffer->pages = kmalloc_array(pagecount, sizeof(*buffer->pages),
478 				      GFP_KERNEL);
479 	if (!buffer->pages) {
480 		ret = -ENOMEM;
481 		goto free_cma;
482 	}
483 
484 	for (pg = 0; pg < pagecount; pg++)
485 		buffer->pages[pg] = &cma_pages[pg];
486 
487 	buffer->cma_pages = cma_pages;
488 	buffer->heap = cma_heap;
489 	buffer->pagecount = pagecount;
490 
491 	/* create the dmabuf */
492 	exp_info.exp_name = rk_dma_heap_get_name(heap);
493 	exp_info.ops = &rk_cma_heap_buf_ops;
494 	exp_info.size = buffer->len;
495 	exp_info.flags = fd_flags;
496 	exp_info.priv = buffer;
497 	dmabuf = dma_buf_export(&exp_info);
498 	if (IS_ERR(dmabuf)) {
499 		ret = PTR_ERR(dmabuf);
500 		goto free_pages;
501 	}
502 
503 	buffer->phys = page_to_phys(cma_pages);
504 	dma_sync_single_for_cpu(rk_dma_heap_get_dev(heap), buffer->phys,
505 				buffer->pagecount * PAGE_SIZE,
506 				DMA_FROM_DEVICE);
507 
508 	ret = rk_cma_heap_add_dmabuf_list(dmabuf, name);
509 	if (ret)
510 		goto fail_dma_buf;
511 
512 	rk_dma_heap_total_inc(heap, buffer->len);
513 
514 	return dmabuf;
515 
516 fail_dma_buf:
517 	dma_buf_put(dmabuf);
518 free_pages:
519 	kfree(buffer->pages);
520 free_cma:
521 	cma_release(cma_heap->cma, cma_pages, pagecount);
522 free_buffer:
523 	kfree(buffer);
524 
525 	return ERR_PTR(ret);
526 }
527 
rk_cma_heap_allocate_pages(struct rk_dma_heap * heap,size_t len,const char * name)528 static struct page *rk_cma_heap_allocate_pages(struct rk_dma_heap *heap,
529 					       size_t len, const char *name)
530 {
531 	struct rk_cma_heap *cma_heap = rk_dma_heap_get_drvdata(heap);
532 	size_t size = PAGE_ALIGN(len);
533 	pgoff_t pagecount = size >> PAGE_SHIFT;
534 	unsigned long align = get_order(size);
535 	struct page *page;
536 	int ret;
537 
538 	if (align > CONFIG_DMABUF_HEAPS_ROCKCHIP_CMA_ALIGNMENT)
539 		align = CONFIG_DMABUF_HEAPS_ROCKCHIP_CMA_ALIGNMENT;
540 
541 	page = cma_alloc(cma_heap->cma, pagecount, align, GFP_KERNEL);
542 	if (!page)
543 		return ERR_PTR(-ENOMEM);
544 
545 	ret = rk_cma_heap_add_contig_list(heap, page, size, name);
546 	if (ret) {
547 		cma_release(cma_heap->cma, page, pagecount);
548 		return ERR_PTR(-EINVAL);
549 	}
550 
551 	rk_dma_heap_total_inc(heap, size);
552 
553 	return page;
554 }
555 
rk_cma_heap_free_pages(struct rk_dma_heap * heap,struct page * page,size_t len,const char * name)556 static void rk_cma_heap_free_pages(struct rk_dma_heap *heap,
557 				   struct page *page, size_t len,
558 				   const char *name)
559 {
560 	struct rk_cma_heap *cma_heap = rk_dma_heap_get_drvdata(heap);
561 	pgoff_t pagecount = len >> PAGE_SHIFT;
562 
563 	rk_cma_heap_remove_contig_list(heap, page, name);
564 
565 	cma_release(cma_heap->cma, page, pagecount);
566 
567 	rk_dma_heap_total_dec(heap, len);
568 }
569 
570 static const struct rk_dma_heap_ops rk_cma_heap_ops = {
571 	.allocate = rk_cma_heap_allocate,
572 	.alloc_contig_pages = rk_cma_heap_allocate_pages,
573 	.free_contig_pages = rk_cma_heap_free_pages,
574 };
575 
576 static int cma_procfs_show(struct seq_file *s, void *private);
577 
__rk_add_cma_heap(struct cma * cma,void * data)578 static int __rk_add_cma_heap(struct cma *cma, void *data)
579 {
580 	struct rk_cma_heap *cma_heap;
581 	struct rk_dma_heap_export_info exp_info;
582 
583 	cma_heap = kzalloc(sizeof(*cma_heap), GFP_KERNEL);
584 	if (!cma_heap)
585 		return -ENOMEM;
586 	cma_heap->cma = cma;
587 
588 	exp_info.name = cma_get_name(cma);
589 	exp_info.ops = &rk_cma_heap_ops;
590 	exp_info.priv = cma_heap;
591 	exp_info.support_cma = true;
592 
593 	cma_heap->heap = rk_dma_heap_add(&exp_info);
594 	if (IS_ERR(cma_heap->heap)) {
595 		int ret = PTR_ERR(cma_heap->heap);
596 
597 		kfree(cma_heap);
598 		return ret;
599 	}
600 
601 	if (cma_heap->heap->procfs)
602 		proc_create_single_data("alloc_bitmap", 0, cma_heap->heap->procfs,
603 					cma_procfs_show, cma);
604 
605 	return 0;
606 }
607 
rk_add_default_cma_heap(void)608 static int __init rk_add_default_cma_heap(void)
609 {
610 	struct cma *cma = rk_dma_heap_get_cma();
611 
612 	if (WARN_ON(!cma))
613 		return -EINVAL;
614 
615 	return __rk_add_cma_heap(cma, NULL);
616 }
617 
618 #if defined(CONFIG_VIDEO_ROCKCHIP_THUNDER_BOOT_ISP) && !defined(CONFIG_INITCALL_ASYNC)
619 subsys_initcall(rk_add_default_cma_heap);
620 #else
621 module_init(rk_add_default_cma_heap);
622 #endif
623 
cma_procfs_format_array(char * buf,size_t bufsize,u32 * array,int array_size)624 static void cma_procfs_format_array(char *buf, size_t bufsize, u32 *array, int array_size)
625 {
626 	int i = 0;
627 
628 	while (--array_size >= 0) {
629 		size_t len;
630 		char term = (array_size && (++i % 8)) ? ' ' : '\n';
631 
632 		len = snprintf(buf, bufsize, "%08X%c", *array++, term);
633 		buf += len;
634 		bufsize -= len;
635 	}
636 }
637 
cma_procfs_show_bitmap(struct seq_file * s,struct cma * cma)638 static void cma_procfs_show_bitmap(struct seq_file *s, struct cma *cma)
639 {
640 	int elements = DIV_ROUND_UP(cma_bitmap_maxno(cma), BITS_PER_BYTE * sizeof(u32));
641 	int size = elements * 9;
642 	u32 *array = (u32 *)cma->bitmap;
643 	char *buf;
644 
645 	buf = kmalloc(size + 1, GFP_KERNEL);
646 	if (!buf)
647 		return;
648 
649 	buf[size] = 0;
650 
651 	cma_procfs_format_array(buf, size + 1, array, elements);
652 	seq_printf(s, "%s", buf);
653 	kfree(buf);
654 }
655 
cma_procfs_used_get(struct cma * cma)656 static u64 cma_procfs_used_get(struct cma *cma)
657 {
658 	unsigned long used;
659 
660 	mutex_lock(&cma->lock);
661 	used = bitmap_weight(cma->bitmap, (int)cma_bitmap_maxno(cma));
662 	mutex_unlock(&cma->lock);
663 
664 	return (u64)used << cma->order_per_bit;
665 }
666 
cma_procfs_show(struct seq_file * s,void * private)667 static int cma_procfs_show(struct seq_file *s, void *private)
668 {
669 	struct cma *cma = s->private;
670 	u64 used = cma_procfs_used_get(cma);
671 
672 	seq_printf(s, "Total: %lu KiB\n", cma->count << (PAGE_SHIFT - 10));
673 	seq_printf(s, " Used: %llu KiB\n\n", used << (PAGE_SHIFT - 10));
674 
675 	cma_procfs_show_bitmap(s, cma);
676 
677 	return 0;
678 }
679 
680 MODULE_DESCRIPTION("RockChip DMA-BUF CMA Heap");
681 MODULE_LICENSE("GPL v2");
682