1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Framework for userspace DMA-BUF allocations
4 *
5 * Copyright (C) 2011 Google, Inc.
6 * Copyright (C) 2019 Linaro Ltd.
7 * Copyright (C) 2022 Rockchip Electronics Co. Ltd.
8 * Author: Simon Xue <xxm@rock-chips.com>
9 */
10
11 #include <linux/cma.h>
12 #include <linux/cdev.h>
13 #include <linux/debugfs.h>
14 #include <linux/device.h>
15 #include <linux/dma-buf.h>
16 #include <linux/dma-resv.h>
17 #include <linux/dma-map-ops.h>
18 #include <linux/err.h>
19 #include <linux/xarray.h>
20 #include <linux/list.h>
21 #include <linux/proc_fs.h>
22 #include <linux/slab.h>
23 #include <linux/uaccess.h>
24 #include <linux/syscalls.h>
25 #include <uapi/linux/rk-dma-heap.h>
26
27 #include "rk-dma-heap.h"
28
29 #define DEVNAME "rk_dma_heap"
30
31 #define NUM_HEAP_MINORS 128
32
33 static LIST_HEAD(rk_heap_list);
34 static DEFINE_MUTEX(rk_heap_list_lock);
35 static dev_t rk_dma_heap_devt;
36 static struct class *rk_dma_heap_class;
37 static DEFINE_XARRAY_ALLOC(rk_dma_heap_minors);
38 struct proc_dir_entry *proc_rk_dma_heap_dir;
39
40 #define K(size) ((unsigned long)((size) >> 10))
41
rk_vmap_pfn_apply(pte_t * pte,unsigned long addr,void * private)42 static int rk_vmap_pfn_apply(pte_t *pte, unsigned long addr, void *private)
43 {
44 struct rk_vmap_pfn_data *data = private;
45
46 *pte = pte_mkspecial(pfn_pte(data->pfn++, data->prot));
47 return 0;
48 }
49
rk_vmap_contig_pfn(unsigned long pfn,unsigned int count,pgprot_t prot)50 void *rk_vmap_contig_pfn(unsigned long pfn, unsigned int count, pgprot_t prot)
51 {
52 struct rk_vmap_pfn_data data = { .pfn = pfn, .prot = pgprot_nx(prot) };
53 struct vm_struct *area;
54
55 area = get_vm_area_caller(count * PAGE_SIZE, VM_MAP,
56 __builtin_return_address(0));
57 if (!area)
58 return NULL;
59 if (apply_to_page_range(&init_mm, (unsigned long)area->addr,
60 count * PAGE_SIZE, rk_vmap_pfn_apply, &data)) {
61 free_vm_area(area);
62 return NULL;
63 }
64 return area->addr;
65 }
66
rk_dma_heap_set_dev(struct device * heap_dev)67 int rk_dma_heap_set_dev(struct device *heap_dev)
68 {
69 int err = 0;
70
71 if (!heap_dev)
72 return -EINVAL;
73
74 dma_coerce_mask_and_coherent(heap_dev, DMA_BIT_MASK(64));
75
76 if (!heap_dev->dma_parms) {
77 heap_dev->dma_parms = devm_kzalloc(heap_dev,
78 sizeof(*heap_dev->dma_parms),
79 GFP_KERNEL);
80 if (!heap_dev->dma_parms)
81 return -ENOMEM;
82
83 err = dma_set_max_seg_size(heap_dev, (unsigned int)DMA_BIT_MASK(64));
84 if (err) {
85 devm_kfree(heap_dev, heap_dev->dma_parms);
86 dev_err(heap_dev, "Failed to set DMA segment size, err:%d\n", err);
87 return err;
88 }
89 }
90
91 return 0;
92 }
93 EXPORT_SYMBOL_GPL(rk_dma_heap_set_dev);
94
rk_dma_heap_find(const char * name)95 struct rk_dma_heap *rk_dma_heap_find(const char *name)
96 {
97 struct rk_dma_heap *h;
98
99 mutex_lock(&rk_heap_list_lock);
100 list_for_each_entry(h, &rk_heap_list, list) {
101 if (!strcmp(h->name, name)) {
102 kref_get(&h->refcount);
103 mutex_unlock(&rk_heap_list_lock);
104 return h;
105 }
106 }
107 mutex_unlock(&rk_heap_list_lock);
108 return NULL;
109 }
110 EXPORT_SYMBOL_GPL(rk_dma_heap_find);
111
rk_dma_heap_buffer_free(struct dma_buf * dmabuf)112 void rk_dma_heap_buffer_free(struct dma_buf *dmabuf)
113 {
114 dma_buf_put(dmabuf);
115 }
116 EXPORT_SYMBOL_GPL(rk_dma_heap_buffer_free);
117
rk_dma_heap_buffer_alloc(struct rk_dma_heap * heap,size_t len,unsigned int fd_flags,unsigned int heap_flags,const char * name)118 struct dma_buf *rk_dma_heap_buffer_alloc(struct rk_dma_heap *heap, size_t len,
119 unsigned int fd_flags,
120 unsigned int heap_flags,
121 const char *name)
122 {
123 struct dma_buf *dmabuf;
124
125 if (fd_flags & ~RK_DMA_HEAP_VALID_FD_FLAGS)
126 return ERR_PTR(-EINVAL);
127
128 if (heap_flags & ~RK_DMA_HEAP_VALID_HEAP_FLAGS)
129 return ERR_PTR(-EINVAL);
130 /*
131 * Allocations from all heaps have to begin
132 * and end on page boundaries.
133 */
134 len = PAGE_ALIGN(len);
135 if (!len)
136 return ERR_PTR(-EINVAL);
137
138 dmabuf = heap->ops->allocate(heap, len, fd_flags, heap_flags, name);
139
140 if (IS_ENABLED(CONFIG_DMABUF_RK_HEAPS_DEBUG) && !IS_ERR(dmabuf))
141 dma_buf_set_name(dmabuf, name);
142
143 return dmabuf;
144 }
145 EXPORT_SYMBOL_GPL(rk_dma_heap_buffer_alloc);
146
rk_dma_heap_bufferfd_alloc(struct rk_dma_heap * heap,size_t len,unsigned int fd_flags,unsigned int heap_flags,const char * name)147 int rk_dma_heap_bufferfd_alloc(struct rk_dma_heap *heap, size_t len,
148 unsigned int fd_flags,
149 unsigned int heap_flags,
150 const char *name)
151 {
152 struct dma_buf *dmabuf;
153 int fd;
154
155 dmabuf = rk_dma_heap_buffer_alloc(heap, len, fd_flags, heap_flags,
156 name);
157
158 if (IS_ERR(dmabuf))
159 return PTR_ERR(dmabuf);
160
161 fd = dma_buf_fd(dmabuf, fd_flags);
162 if (fd < 0) {
163 dma_buf_put(dmabuf);
164 /* just return, as put will call release and that will free */
165 }
166
167 return fd;
168
169 }
170 EXPORT_SYMBOL_GPL(rk_dma_heap_bufferfd_alloc);
171
rk_dma_heap_alloc_contig_pages(struct rk_dma_heap * heap,size_t len,const char * name)172 struct page *rk_dma_heap_alloc_contig_pages(struct rk_dma_heap *heap,
173 size_t len, const char *name)
174 {
175 if (!heap->support_cma) {
176 WARN_ON(!heap->support_cma);
177 return ERR_PTR(-EINVAL);
178 }
179
180 len = PAGE_ALIGN(len);
181 if (!len)
182 return ERR_PTR(-EINVAL);
183
184 return heap->ops->alloc_contig_pages(heap, len, name);
185 }
186 EXPORT_SYMBOL_GPL(rk_dma_heap_alloc_contig_pages);
187
rk_dma_heap_free_contig_pages(struct rk_dma_heap * heap,struct page * pages,size_t len,const char * name)188 void rk_dma_heap_free_contig_pages(struct rk_dma_heap *heap,
189 struct page *pages, size_t len,
190 const char *name)
191 {
192 if (!heap->support_cma) {
193 WARN_ON(!heap->support_cma);
194 return;
195 }
196
197 return heap->ops->free_contig_pages(heap, pages, len, name);
198 }
199 EXPORT_SYMBOL_GPL(rk_dma_heap_free_contig_pages);
200
rk_dma_heap_total_inc(struct rk_dma_heap * heap,size_t len)201 void rk_dma_heap_total_inc(struct rk_dma_heap *heap, size_t len)
202 {
203 mutex_lock(&rk_heap_list_lock);
204 heap->total_size += len;
205 mutex_unlock(&rk_heap_list_lock);
206 }
207
rk_dma_heap_total_dec(struct rk_dma_heap * heap,size_t len)208 void rk_dma_heap_total_dec(struct rk_dma_heap *heap, size_t len)
209 {
210 mutex_lock(&rk_heap_list_lock);
211 if (WARN_ON(heap->total_size < len))
212 heap->total_size = 0;
213 else
214 heap->total_size -= len;
215 mutex_unlock(&rk_heap_list_lock);
216 }
217
rk_dma_heap_open(struct inode * inode,struct file * file)218 static int rk_dma_heap_open(struct inode *inode, struct file *file)
219 {
220 struct rk_dma_heap *heap;
221
222 heap = xa_load(&rk_dma_heap_minors, iminor(inode));
223 if (!heap) {
224 pr_err("dma_heap: minor %d unknown.\n", iminor(inode));
225 return -ENODEV;
226 }
227
228 /* instance data as context */
229 file->private_data = heap;
230 nonseekable_open(inode, file);
231
232 return 0;
233 }
234
rk_dma_heap_ioctl_allocate(struct file * file,void * data)235 static long rk_dma_heap_ioctl_allocate(struct file *file, void *data)
236 {
237 struct rk_dma_heap_allocation_data *heap_allocation = data;
238 struct rk_dma_heap *heap = file->private_data;
239 int fd;
240
241 if (heap_allocation->fd)
242 return -EINVAL;
243
244 fd = rk_dma_heap_bufferfd_alloc(heap, heap_allocation->len,
245 heap_allocation->fd_flags,
246 heap_allocation->heap_flags, NULL);
247 if (fd < 0)
248 return fd;
249
250 heap_allocation->fd = fd;
251
252 return 0;
253 }
254
255 static unsigned int rk_dma_heap_ioctl_cmds[] = {
256 RK_DMA_HEAP_IOCTL_ALLOC,
257 };
258
rk_dma_heap_ioctl(struct file * file,unsigned int ucmd,unsigned long arg)259 static long rk_dma_heap_ioctl(struct file *file, unsigned int ucmd,
260 unsigned long arg)
261 {
262 char stack_kdata[128];
263 char *kdata = stack_kdata;
264 unsigned int kcmd;
265 unsigned int in_size, out_size, drv_size, ksize;
266 int nr = _IOC_NR(ucmd);
267 int ret = 0;
268
269 if (nr >= ARRAY_SIZE(rk_dma_heap_ioctl_cmds))
270 return -EINVAL;
271
272 /* Get the kernel ioctl cmd that matches */
273 kcmd = rk_dma_heap_ioctl_cmds[nr];
274
275 /* Figure out the delta between user cmd size and kernel cmd size */
276 drv_size = _IOC_SIZE(kcmd);
277 out_size = _IOC_SIZE(ucmd);
278 in_size = out_size;
279 if ((ucmd & kcmd & IOC_IN) == 0)
280 in_size = 0;
281 if ((ucmd & kcmd & IOC_OUT) == 0)
282 out_size = 0;
283 ksize = max(max(in_size, out_size), drv_size);
284
285 /* If necessary, allocate buffer for ioctl argument */
286 if (ksize > sizeof(stack_kdata)) {
287 kdata = kmalloc(ksize, GFP_KERNEL);
288 if (!kdata)
289 return -ENOMEM;
290 }
291
292 if (copy_from_user(kdata, (void __user *)arg, in_size) != 0) {
293 ret = -EFAULT;
294 goto err;
295 }
296
297 /* zero out any difference between the kernel/user structure size */
298 if (ksize > in_size)
299 memset(kdata + in_size, 0, ksize - in_size);
300
301 switch (kcmd) {
302 case RK_DMA_HEAP_IOCTL_ALLOC:
303 ret = rk_dma_heap_ioctl_allocate(file, kdata);
304 break;
305 default:
306 ret = -ENOTTY;
307 goto err;
308 }
309
310 if (copy_to_user((void __user *)arg, kdata, out_size) != 0)
311 ret = -EFAULT;
312 err:
313 if (kdata != stack_kdata)
314 kfree(kdata);
315 return ret;
316 }
317
318 static const struct file_operations rk_dma_heap_fops = {
319 .owner = THIS_MODULE,
320 .open = rk_dma_heap_open,
321 .unlocked_ioctl = rk_dma_heap_ioctl,
322 #ifdef CONFIG_COMPAT
323 .compat_ioctl = rk_dma_heap_ioctl,
324 #endif
325 };
326
327 /**
328 * rk_dma_heap_get_drvdata() - get per-subdriver data for the heap
329 * @heap: DMA-Heap to retrieve private data for
330 *
331 * Returns:
332 * The per-subdriver data for the heap.
333 */
rk_dma_heap_get_drvdata(struct rk_dma_heap * heap)334 void *rk_dma_heap_get_drvdata(struct rk_dma_heap *heap)
335 {
336 return heap->priv;
337 }
338
rk_dma_heap_release(struct kref * ref)339 static void rk_dma_heap_release(struct kref *ref)
340 {
341 struct rk_dma_heap *heap = container_of(ref, struct rk_dma_heap, refcount);
342 int minor = MINOR(heap->heap_devt);
343
344 /* Note, we already holding the rk_heap_list_lock here */
345 list_del(&heap->list);
346
347 device_destroy(rk_dma_heap_class, heap->heap_devt);
348 cdev_del(&heap->heap_cdev);
349 xa_erase(&rk_dma_heap_minors, minor);
350
351 kfree(heap);
352 }
353
rk_dma_heap_put(struct rk_dma_heap * h)354 void rk_dma_heap_put(struct rk_dma_heap *h)
355 {
356 /*
357 * Take the rk_heap_list_lock now to avoid racing with code
358 * scanning the list and then taking a kref.
359 */
360 mutex_lock(&rk_heap_list_lock);
361 kref_put(&h->refcount, rk_dma_heap_release);
362 mutex_unlock(&rk_heap_list_lock);
363 }
364
365 /**
366 * rk_dma_heap_get_dev() - get device struct for the heap
367 * @heap: DMA-Heap to retrieve device struct from
368 *
369 * Returns:
370 * The device struct for the heap.
371 */
rk_dma_heap_get_dev(struct rk_dma_heap * heap)372 struct device *rk_dma_heap_get_dev(struct rk_dma_heap *heap)
373 {
374 return heap->heap_dev;
375 }
376
377 /**
378 * rk_dma_heap_get_name() - get heap name
379 * @heap: DMA-Heap to retrieve private data for
380 *
381 * Returns:
382 * The char* for the heap name.
383 */
rk_dma_heap_get_name(struct rk_dma_heap * heap)384 const char *rk_dma_heap_get_name(struct rk_dma_heap *heap)
385 {
386 return heap->name;
387 }
388
rk_dma_heap_add(const struct rk_dma_heap_export_info * exp_info)389 struct rk_dma_heap *rk_dma_heap_add(const struct rk_dma_heap_export_info *exp_info)
390 {
391 struct rk_dma_heap *heap, *err_ret;
392 unsigned int minor;
393 int ret;
394
395 if (!exp_info->name || !strcmp(exp_info->name, "")) {
396 pr_err("rk_dma_heap: Cannot add heap without a name\n");
397 return ERR_PTR(-EINVAL);
398 }
399
400 if (!exp_info->ops || !exp_info->ops->allocate) {
401 pr_err("rk_dma_heap: Cannot add heap with invalid ops struct\n");
402 return ERR_PTR(-EINVAL);
403 }
404
405 /* check the name is unique */
406 heap = rk_dma_heap_find(exp_info->name);
407 if (heap) {
408 pr_err("rk_dma_heap: Already registered heap named %s\n",
409 exp_info->name);
410 rk_dma_heap_put(heap);
411 return ERR_PTR(-EINVAL);
412 }
413
414 heap = kzalloc(sizeof(*heap), GFP_KERNEL);
415 if (!heap)
416 return ERR_PTR(-ENOMEM);
417
418 kref_init(&heap->refcount);
419 heap->name = exp_info->name;
420 heap->ops = exp_info->ops;
421 heap->priv = exp_info->priv;
422 heap->support_cma = exp_info->support_cma;
423 INIT_LIST_HEAD(&heap->dmabuf_list);
424 INIT_LIST_HEAD(&heap->contig_list);
425 mutex_init(&heap->dmabuf_lock);
426 mutex_init(&heap->contig_lock);
427
428 /* Find unused minor number */
429 ret = xa_alloc(&rk_dma_heap_minors, &minor, heap,
430 XA_LIMIT(0, NUM_HEAP_MINORS - 1), GFP_KERNEL);
431 if (ret < 0) {
432 pr_err("rk_dma_heap: Unable to get minor number for heap\n");
433 err_ret = ERR_PTR(ret);
434 goto err0;
435 }
436
437 /* Create device */
438 heap->heap_devt = MKDEV(MAJOR(rk_dma_heap_devt), minor);
439
440 cdev_init(&heap->heap_cdev, &rk_dma_heap_fops);
441 ret = cdev_add(&heap->heap_cdev, heap->heap_devt, 1);
442 if (ret < 0) {
443 pr_err("dma_heap: Unable to add char device\n");
444 err_ret = ERR_PTR(ret);
445 goto err1;
446 }
447
448 heap->heap_dev = device_create(rk_dma_heap_class,
449 NULL,
450 heap->heap_devt,
451 NULL,
452 heap->name);
453 if (IS_ERR(heap->heap_dev)) {
454 pr_err("rk_dma_heap: Unable to create device\n");
455 err_ret = ERR_CAST(heap->heap_dev);
456 goto err2;
457 }
458
459 heap->procfs = proc_rk_dma_heap_dir;
460
461 /* Make sure it doesn't disappear on us */
462 heap->heap_dev = get_device(heap->heap_dev);
463
464 /* Add heap to the list */
465 mutex_lock(&rk_heap_list_lock);
466 list_add(&heap->list, &rk_heap_list);
467 mutex_unlock(&rk_heap_list_lock);
468
469 return heap;
470
471 err2:
472 cdev_del(&heap->heap_cdev);
473 err1:
474 xa_erase(&rk_dma_heap_minors, minor);
475 err0:
476 kfree(heap);
477 return err_ret;
478 }
479
rk_dma_heap_devnode(struct device * dev,umode_t * mode)480 static char *rk_dma_heap_devnode(struct device *dev, umode_t *mode)
481 {
482 return kasprintf(GFP_KERNEL, "rk_dma_heap/%s", dev_name(dev));
483 }
484
rk_dma_heap_dump_dmabuf(const struct dma_buf * dmabuf,void * data)485 static int rk_dma_heap_dump_dmabuf(const struct dma_buf *dmabuf, void *data)
486 {
487 struct rk_dma_heap *heap = (struct rk_dma_heap *)data;
488 struct rk_dma_heap_dmabuf *buf;
489 struct dma_buf_attachment *a;
490 phys_addr_t size;
491 int attach_count;
492 int ret;
493
494 if (!strcmp(dmabuf->exp_name, heap->name)) {
495 seq_printf(heap->s, "dma-heap:<%s> -dmabuf", heap->name);
496 mutex_lock(&heap->dmabuf_lock);
497 list_for_each_entry(buf, &heap->dmabuf_list, node) {
498 if (buf->dmabuf->file->f_inode->i_ino ==
499 dmabuf->file->f_inode->i_ino) {
500 seq_printf(heap->s,
501 "\ti_ino = %ld\n",
502 dmabuf->file->f_inode->i_ino);
503 size = buf->end - buf->start + 1;
504 seq_printf(heap->s,
505 "\tAlloc by (%-20s)\t[%pa-%pa]\t%pa (%lu KiB)\n",
506 dmabuf->name, &buf->start,
507 &buf->end, &size, K(size));
508 seq_puts(heap->s, "\t\tAttached Devices:\n");
509 attach_count = 0;
510 ret = dma_resv_lock_interruptible(dmabuf->resv,
511 NULL);
512 if (ret)
513 goto error_unlock;
514 list_for_each_entry(a, &dmabuf->attachments,
515 node) {
516 seq_printf(heap->s, "\t\t%s\n",
517 dev_name(a->dev));
518 attach_count++;
519 }
520 dma_resv_unlock(dmabuf->resv);
521 seq_printf(heap->s,
522 "Total %d devices attached\n\n",
523 attach_count);
524 }
525 }
526 mutex_unlock(&heap->dmabuf_lock);
527 }
528
529 return 0;
530 error_unlock:
531 mutex_unlock(&heap->dmabuf_lock);
532 return ret;
533 }
534
rk_dma_heap_dump_contig(void * data)535 static int rk_dma_heap_dump_contig(void *data)
536 {
537 struct rk_dma_heap *heap = (struct rk_dma_heap *)data;
538 struct rk_dma_heap_contig_buf *buf;
539 phys_addr_t size;
540
541 mutex_lock(&heap->contig_lock);
542 list_for_each_entry(buf, &heap->contig_list, node) {
543 size = buf->end - buf->start + 1;
544 seq_printf(heap->s, "dma-heap:<%s> -non dmabuf\n", heap->name);
545 seq_printf(heap->s, "\tAlloc by (%-20s)\t[%pa-%pa]\t%pa (%lu KiB)\n",
546 buf->orig_alloc, &buf->start, &buf->end, &size, K(size));
547 }
548 mutex_unlock(&heap->contig_lock);
549
550 return 0;
551 }
552
rk_total_pools_kb_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)553 static ssize_t rk_total_pools_kb_show(struct kobject *kobj,
554 struct kobj_attribute *attr, char *buf)
555 {
556 struct rk_dma_heap *heap;
557 u64 total_pool_size = 0;
558
559 mutex_lock(&rk_heap_list_lock);
560 list_for_each_entry(heap, &rk_heap_list, list)
561 if (heap->ops->get_pool_size)
562 total_pool_size += heap->ops->get_pool_size(heap);
563 mutex_unlock(&rk_heap_list_lock);
564
565 return sysfs_emit(buf, "%llu\n", total_pool_size / 1024);
566 }
567
568 static struct kobj_attribute rk_total_pools_kb_attr =
569 __ATTR_RO(rk_total_pools_kb);
570
571 static struct attribute *rk_dma_heap_sysfs_attrs[] = {
572 &rk_total_pools_kb_attr.attr,
573 NULL,
574 };
575
576 ATTRIBUTE_GROUPS(rk_dma_heap_sysfs);
577
578 static struct kobject *rk_dma_heap_kobject;
579
rk_dma_heap_sysfs_setup(void)580 static int rk_dma_heap_sysfs_setup(void)
581 {
582 int ret;
583
584 rk_dma_heap_kobject = kobject_create_and_add("rk_dma_heap",
585 kernel_kobj);
586 if (!rk_dma_heap_kobject)
587 return -ENOMEM;
588
589 ret = sysfs_create_groups(rk_dma_heap_kobject,
590 rk_dma_heap_sysfs_groups);
591 if (ret) {
592 kobject_put(rk_dma_heap_kobject);
593 return ret;
594 }
595
596 return 0;
597 }
598
rk_dma_heap_sysfs_teardown(void)599 static void rk_dma_heap_sysfs_teardown(void)
600 {
601 kobject_put(rk_dma_heap_kobject);
602 }
603
604 #ifdef CONFIG_DEBUG_FS
605
606 static struct dentry *rk_dma_heap_debugfs_dir;
607
rk_dma_heap_debug_show(struct seq_file * s,void * unused)608 static int rk_dma_heap_debug_show(struct seq_file *s, void *unused)
609 {
610 struct rk_dma_heap *heap;
611 unsigned long total = 0;
612
613 mutex_lock(&rk_heap_list_lock);
614 list_for_each_entry(heap, &rk_heap_list, list) {
615 heap->s = s;
616 get_each_dmabuf(rk_dma_heap_dump_dmabuf, heap);
617 rk_dma_heap_dump_contig(heap);
618 total += heap->total_size;
619 }
620 seq_printf(s, "\nTotal : 0x%lx (%lu KiB)\n", total, K(total));
621 mutex_unlock(&rk_heap_list_lock);
622
623 return 0;
624 }
625 DEFINE_SHOW_ATTRIBUTE(rk_dma_heap_debug);
626
rk_dma_heap_init_debugfs(void)627 static int rk_dma_heap_init_debugfs(void)
628 {
629 struct dentry *d;
630 int err = 0;
631
632 d = debugfs_create_dir("rk_dma_heap", NULL);
633 if (IS_ERR(d))
634 return PTR_ERR(d);
635
636 rk_dma_heap_debugfs_dir = d;
637
638 d = debugfs_create_file("dma_heap_info", 0444,
639 rk_dma_heap_debugfs_dir, NULL,
640 &rk_dma_heap_debug_fops);
641 if (IS_ERR(d)) {
642 dma_heap_print("rk_dma_heap : debugfs: failed to create node bufinfo\n");
643 debugfs_remove_recursive(rk_dma_heap_debugfs_dir);
644 rk_dma_heap_debugfs_dir = NULL;
645 err = PTR_ERR(d);
646 }
647
648 return err;
649 }
650 #else
rk_dma_heap_init_debugfs(void)651 static inline int rk_dma_heap_init_debugfs(void)
652 {
653 return 0;
654 }
655 #endif
656
rk_dma_heap_proc_show(struct seq_file * s,void * unused)657 static int rk_dma_heap_proc_show(struct seq_file *s, void *unused)
658 {
659 struct rk_dma_heap *heap;
660 unsigned long total = 0;
661
662 mutex_lock(&rk_heap_list_lock);
663 list_for_each_entry(heap, &rk_heap_list, list) {
664 heap->s = s;
665 get_each_dmabuf(rk_dma_heap_dump_dmabuf, heap);
666 rk_dma_heap_dump_contig(heap);
667 total += heap->total_size;
668 }
669 seq_printf(s, "\nTotal : 0x%lx (%lu KiB)\n", total, K(total));
670 mutex_unlock(&rk_heap_list_lock);
671
672 return 0;
673 }
674
rk_dma_heap_info_proc_open(struct inode * inode,struct file * file)675 static int rk_dma_heap_info_proc_open(struct inode *inode,
676 struct file *file)
677 {
678 return single_open(file, rk_dma_heap_proc_show, NULL);
679 }
680
681 static const struct proc_ops rk_dma_heap_info_proc_fops = {
682 .proc_open = rk_dma_heap_info_proc_open,
683 .proc_read = seq_read,
684 .proc_lseek = seq_lseek,
685 .proc_release = single_release,
686 };
687
rk_dma_heap_init_proc(void)688 static int rk_dma_heap_init_proc(void)
689 {
690 proc_rk_dma_heap_dir = proc_mkdir("rk_dma_heap", NULL);
691 if (!proc_rk_dma_heap_dir) {
692 pr_err("create rk_dma_heap proc dir error\n");
693 return -ENOENT;
694 }
695
696 proc_create("dma_heap_info", 0644, proc_rk_dma_heap_dir,
697 &rk_dma_heap_info_proc_fops);
698
699 return 0;
700 }
701
rk_dma_heap_init(void)702 static int rk_dma_heap_init(void)
703 {
704 int ret;
705
706 ret = rk_dma_heap_sysfs_setup();
707 if (ret)
708 return ret;
709
710 ret = alloc_chrdev_region(&rk_dma_heap_devt, 0, NUM_HEAP_MINORS,
711 DEVNAME);
712 if (ret)
713 goto err_chrdev;
714
715 rk_dma_heap_class = class_create(THIS_MODULE, DEVNAME);
716 if (IS_ERR(rk_dma_heap_class)) {
717 ret = PTR_ERR(rk_dma_heap_class);
718 goto err_class;
719 }
720 rk_dma_heap_class->devnode = rk_dma_heap_devnode;
721
722 rk_dma_heap_init_debugfs();
723 rk_dma_heap_init_proc();
724
725 return 0;
726
727 err_class:
728 unregister_chrdev_region(rk_dma_heap_devt, NUM_HEAP_MINORS);
729 err_chrdev:
730 rk_dma_heap_sysfs_teardown();
731 return ret;
732 }
733 subsys_initcall(rk_dma_heap_init);
734