1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * DMABUF CMA heap exporter
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * Copyright (C) 2012, 2019, 2020 Linaro Ltd.
6*4882a593Smuzhiyun * Author: <benjamin.gaignard@linaro.org> for ST-Ericsson.
7*4882a593Smuzhiyun *
8*4882a593Smuzhiyun * Also utilizing parts of Andrew Davis' SRAM heap:
9*4882a593Smuzhiyun * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com/
10*4882a593Smuzhiyun * Andrew F. Davis <afd@ti.com>
11*4882a593Smuzhiyun */
12*4882a593Smuzhiyun #include <linux/cma.h>
13*4882a593Smuzhiyun #include <linux/dma-buf.h>
14*4882a593Smuzhiyun #include <linux/dma-heap.h>
15*4882a593Smuzhiyun #include <linux/dma-map-ops.h>
16*4882a593Smuzhiyun #include <linux/err.h>
17*4882a593Smuzhiyun #include <linux/highmem.h>
18*4882a593Smuzhiyun #include <linux/io.h>
19*4882a593Smuzhiyun #include <linux/mm.h>
20*4882a593Smuzhiyun #include <linux/module.h>
21*4882a593Smuzhiyun #include <linux/scatterlist.h>
22*4882a593Smuzhiyun #include <linux/slab.h>
23*4882a593Smuzhiyun #include <linux/vmalloc.h>
24*4882a593Smuzhiyun
25*4882a593Smuzhiyun
26*4882a593Smuzhiyun struct cma_heap {
27*4882a593Smuzhiyun struct dma_heap *heap;
28*4882a593Smuzhiyun struct cma *cma;
29*4882a593Smuzhiyun };
30*4882a593Smuzhiyun
31*4882a593Smuzhiyun struct cma_heap_buffer {
32*4882a593Smuzhiyun struct cma_heap *heap;
33*4882a593Smuzhiyun struct list_head attachments;
34*4882a593Smuzhiyun struct mutex lock;
35*4882a593Smuzhiyun unsigned long len;
36*4882a593Smuzhiyun struct page *cma_pages;
37*4882a593Smuzhiyun struct page **pages;
38*4882a593Smuzhiyun pgoff_t pagecount;
39*4882a593Smuzhiyun int vmap_cnt;
40*4882a593Smuzhiyun void *vaddr;
41*4882a593Smuzhiyun };
42*4882a593Smuzhiyun
43*4882a593Smuzhiyun struct dma_heap_attachment {
44*4882a593Smuzhiyun struct device *dev;
45*4882a593Smuzhiyun struct sg_table table;
46*4882a593Smuzhiyun struct list_head list;
47*4882a593Smuzhiyun bool mapped;
48*4882a593Smuzhiyun };
49*4882a593Smuzhiyun
cma_heap_attach(struct dma_buf * dmabuf,struct dma_buf_attachment * attachment)50*4882a593Smuzhiyun static int cma_heap_attach(struct dma_buf *dmabuf,
51*4882a593Smuzhiyun struct dma_buf_attachment *attachment)
52*4882a593Smuzhiyun {
53*4882a593Smuzhiyun struct cma_heap_buffer *buffer = dmabuf->priv;
54*4882a593Smuzhiyun struct dma_heap_attachment *a;
55*4882a593Smuzhiyun int ret;
56*4882a593Smuzhiyun
57*4882a593Smuzhiyun a = kzalloc(sizeof(*a), GFP_KERNEL);
58*4882a593Smuzhiyun if (!a)
59*4882a593Smuzhiyun return -ENOMEM;
60*4882a593Smuzhiyun
61*4882a593Smuzhiyun ret = sg_alloc_table_from_pages(&a->table, buffer->pages,
62*4882a593Smuzhiyun buffer->pagecount, 0,
63*4882a593Smuzhiyun buffer->pagecount << PAGE_SHIFT,
64*4882a593Smuzhiyun GFP_KERNEL);
65*4882a593Smuzhiyun if (ret) {
66*4882a593Smuzhiyun kfree(a);
67*4882a593Smuzhiyun return ret;
68*4882a593Smuzhiyun }
69*4882a593Smuzhiyun
70*4882a593Smuzhiyun a->dev = attachment->dev;
71*4882a593Smuzhiyun INIT_LIST_HEAD(&a->list);
72*4882a593Smuzhiyun a->mapped = false;
73*4882a593Smuzhiyun
74*4882a593Smuzhiyun attachment->priv = a;
75*4882a593Smuzhiyun
76*4882a593Smuzhiyun mutex_lock(&buffer->lock);
77*4882a593Smuzhiyun list_add(&a->list, &buffer->attachments);
78*4882a593Smuzhiyun mutex_unlock(&buffer->lock);
79*4882a593Smuzhiyun
80*4882a593Smuzhiyun return 0;
81*4882a593Smuzhiyun }
82*4882a593Smuzhiyun
cma_heap_detach(struct dma_buf * dmabuf,struct dma_buf_attachment * attachment)83*4882a593Smuzhiyun static void cma_heap_detach(struct dma_buf *dmabuf,
84*4882a593Smuzhiyun struct dma_buf_attachment *attachment)
85*4882a593Smuzhiyun {
86*4882a593Smuzhiyun struct cma_heap_buffer *buffer = dmabuf->priv;
87*4882a593Smuzhiyun struct dma_heap_attachment *a = attachment->priv;
88*4882a593Smuzhiyun
89*4882a593Smuzhiyun mutex_lock(&buffer->lock);
90*4882a593Smuzhiyun list_del(&a->list);
91*4882a593Smuzhiyun mutex_unlock(&buffer->lock);
92*4882a593Smuzhiyun
93*4882a593Smuzhiyun sg_free_table(&a->table);
94*4882a593Smuzhiyun kfree(a);
95*4882a593Smuzhiyun }
96*4882a593Smuzhiyun
cma_heap_map_dma_buf(struct dma_buf_attachment * attachment,enum dma_data_direction direction)97*4882a593Smuzhiyun static struct sg_table *cma_heap_map_dma_buf(struct dma_buf_attachment *attachment,
98*4882a593Smuzhiyun enum dma_data_direction direction)
99*4882a593Smuzhiyun {
100*4882a593Smuzhiyun struct dma_heap_attachment *a = attachment->priv;
101*4882a593Smuzhiyun struct sg_table *table = &a->table;
102*4882a593Smuzhiyun int attrs = attachment->dma_map_attrs;
103*4882a593Smuzhiyun int ret;
104*4882a593Smuzhiyun
105*4882a593Smuzhiyun ret = dma_map_sgtable(attachment->dev, table, direction, attrs);
106*4882a593Smuzhiyun if (ret)
107*4882a593Smuzhiyun return ERR_PTR(-ENOMEM);
108*4882a593Smuzhiyun a->mapped = true;
109*4882a593Smuzhiyun return table;
110*4882a593Smuzhiyun }
111*4882a593Smuzhiyun
cma_heap_unmap_dma_buf(struct dma_buf_attachment * attachment,struct sg_table * table,enum dma_data_direction direction)112*4882a593Smuzhiyun static void cma_heap_unmap_dma_buf(struct dma_buf_attachment *attachment,
113*4882a593Smuzhiyun struct sg_table *table,
114*4882a593Smuzhiyun enum dma_data_direction direction)
115*4882a593Smuzhiyun {
116*4882a593Smuzhiyun struct dma_heap_attachment *a = attachment->priv;
117*4882a593Smuzhiyun int attrs = attachment->dma_map_attrs;
118*4882a593Smuzhiyun
119*4882a593Smuzhiyun a->mapped = false;
120*4882a593Smuzhiyun dma_unmap_sgtable(attachment->dev, table, direction, attrs);
121*4882a593Smuzhiyun }
122*4882a593Smuzhiyun
cma_heap_dma_buf_begin_cpu_access(struct dma_buf * dmabuf,enum dma_data_direction direction)123*4882a593Smuzhiyun static int cma_heap_dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
124*4882a593Smuzhiyun enum dma_data_direction direction)
125*4882a593Smuzhiyun {
126*4882a593Smuzhiyun struct cma_heap_buffer *buffer = dmabuf->priv;
127*4882a593Smuzhiyun struct dma_heap_attachment *a;
128*4882a593Smuzhiyun
129*4882a593Smuzhiyun mutex_lock(&buffer->lock);
130*4882a593Smuzhiyun
131*4882a593Smuzhiyun if (buffer->vmap_cnt)
132*4882a593Smuzhiyun invalidate_kernel_vmap_range(buffer->vaddr, buffer->len);
133*4882a593Smuzhiyun
134*4882a593Smuzhiyun list_for_each_entry(a, &buffer->attachments, list) {
135*4882a593Smuzhiyun if (!a->mapped)
136*4882a593Smuzhiyun continue;
137*4882a593Smuzhiyun dma_sync_sgtable_for_cpu(a->dev, &a->table, direction);
138*4882a593Smuzhiyun }
139*4882a593Smuzhiyun mutex_unlock(&buffer->lock);
140*4882a593Smuzhiyun
141*4882a593Smuzhiyun return 0;
142*4882a593Smuzhiyun }
143*4882a593Smuzhiyun
cma_heap_dma_buf_end_cpu_access(struct dma_buf * dmabuf,enum dma_data_direction direction)144*4882a593Smuzhiyun static int cma_heap_dma_buf_end_cpu_access(struct dma_buf *dmabuf,
145*4882a593Smuzhiyun enum dma_data_direction direction)
146*4882a593Smuzhiyun {
147*4882a593Smuzhiyun struct cma_heap_buffer *buffer = dmabuf->priv;
148*4882a593Smuzhiyun struct dma_heap_attachment *a;
149*4882a593Smuzhiyun
150*4882a593Smuzhiyun mutex_lock(&buffer->lock);
151*4882a593Smuzhiyun
152*4882a593Smuzhiyun if (buffer->vmap_cnt)
153*4882a593Smuzhiyun flush_kernel_vmap_range(buffer->vaddr, buffer->len);
154*4882a593Smuzhiyun
155*4882a593Smuzhiyun list_for_each_entry(a, &buffer->attachments, list) {
156*4882a593Smuzhiyun if (!a->mapped)
157*4882a593Smuzhiyun continue;
158*4882a593Smuzhiyun dma_sync_sgtable_for_device(a->dev, &a->table, direction);
159*4882a593Smuzhiyun }
160*4882a593Smuzhiyun mutex_unlock(&buffer->lock);
161*4882a593Smuzhiyun
162*4882a593Smuzhiyun return 0;
163*4882a593Smuzhiyun }
164*4882a593Smuzhiyun
cma_heap_vm_fault(struct vm_fault * vmf)165*4882a593Smuzhiyun static vm_fault_t cma_heap_vm_fault(struct vm_fault *vmf)
166*4882a593Smuzhiyun {
167*4882a593Smuzhiyun struct vm_area_struct *vma = vmf->vma;
168*4882a593Smuzhiyun struct cma_heap_buffer *buffer = vma->vm_private_data;
169*4882a593Smuzhiyun
170*4882a593Smuzhiyun if (vmf->pgoff > buffer->pagecount)
171*4882a593Smuzhiyun return VM_FAULT_SIGBUS;
172*4882a593Smuzhiyun
173*4882a593Smuzhiyun vmf->page = buffer->pages[vmf->pgoff];
174*4882a593Smuzhiyun get_page(vmf->page);
175*4882a593Smuzhiyun
176*4882a593Smuzhiyun return 0;
177*4882a593Smuzhiyun }
178*4882a593Smuzhiyun
179*4882a593Smuzhiyun static const struct vm_operations_struct dma_heap_vm_ops = {
180*4882a593Smuzhiyun .fault = cma_heap_vm_fault,
181*4882a593Smuzhiyun };
182*4882a593Smuzhiyun
cma_heap_mmap(struct dma_buf * dmabuf,struct vm_area_struct * vma)183*4882a593Smuzhiyun static int cma_heap_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
184*4882a593Smuzhiyun {
185*4882a593Smuzhiyun struct cma_heap_buffer *buffer = dmabuf->priv;
186*4882a593Smuzhiyun
187*4882a593Smuzhiyun if ((vma->vm_flags & (VM_SHARED | VM_MAYSHARE)) == 0)
188*4882a593Smuzhiyun return -EINVAL;
189*4882a593Smuzhiyun
190*4882a593Smuzhiyun vma->vm_ops = &dma_heap_vm_ops;
191*4882a593Smuzhiyun vma->vm_private_data = buffer;
192*4882a593Smuzhiyun
193*4882a593Smuzhiyun return 0;
194*4882a593Smuzhiyun }
195*4882a593Smuzhiyun
cma_heap_do_vmap(struct cma_heap_buffer * buffer)196*4882a593Smuzhiyun static void *cma_heap_do_vmap(struct cma_heap_buffer *buffer)
197*4882a593Smuzhiyun {
198*4882a593Smuzhiyun void *vaddr;
199*4882a593Smuzhiyun
200*4882a593Smuzhiyun vaddr = vmap(buffer->pages, buffer->pagecount, VM_MAP, PAGE_KERNEL);
201*4882a593Smuzhiyun if (!vaddr)
202*4882a593Smuzhiyun return ERR_PTR(-ENOMEM);
203*4882a593Smuzhiyun
204*4882a593Smuzhiyun return vaddr;
205*4882a593Smuzhiyun }
206*4882a593Smuzhiyun
cma_heap_vmap(struct dma_buf * dmabuf)207*4882a593Smuzhiyun static void *cma_heap_vmap(struct dma_buf *dmabuf)
208*4882a593Smuzhiyun {
209*4882a593Smuzhiyun struct cma_heap_buffer *buffer = dmabuf->priv;
210*4882a593Smuzhiyun void *vaddr;
211*4882a593Smuzhiyun
212*4882a593Smuzhiyun mutex_lock(&buffer->lock);
213*4882a593Smuzhiyun if (buffer->vmap_cnt) {
214*4882a593Smuzhiyun buffer->vmap_cnt++;
215*4882a593Smuzhiyun vaddr = buffer->vaddr;
216*4882a593Smuzhiyun goto out;
217*4882a593Smuzhiyun }
218*4882a593Smuzhiyun
219*4882a593Smuzhiyun vaddr = cma_heap_do_vmap(buffer);
220*4882a593Smuzhiyun if (IS_ERR(vaddr))
221*4882a593Smuzhiyun goto out;
222*4882a593Smuzhiyun
223*4882a593Smuzhiyun buffer->vaddr = vaddr;
224*4882a593Smuzhiyun buffer->vmap_cnt++;
225*4882a593Smuzhiyun out:
226*4882a593Smuzhiyun mutex_unlock(&buffer->lock);
227*4882a593Smuzhiyun
228*4882a593Smuzhiyun return vaddr;
229*4882a593Smuzhiyun }
230*4882a593Smuzhiyun
cma_heap_vunmap(struct dma_buf * dmabuf,void * vaddr)231*4882a593Smuzhiyun static void cma_heap_vunmap(struct dma_buf *dmabuf, void *vaddr)
232*4882a593Smuzhiyun {
233*4882a593Smuzhiyun struct cma_heap_buffer *buffer = dmabuf->priv;
234*4882a593Smuzhiyun
235*4882a593Smuzhiyun mutex_lock(&buffer->lock);
236*4882a593Smuzhiyun if (!--buffer->vmap_cnt) {
237*4882a593Smuzhiyun vunmap(buffer->vaddr);
238*4882a593Smuzhiyun buffer->vaddr = NULL;
239*4882a593Smuzhiyun }
240*4882a593Smuzhiyun mutex_unlock(&buffer->lock);
241*4882a593Smuzhiyun }
242*4882a593Smuzhiyun
cma_heap_dma_buf_release(struct dma_buf * dmabuf)243*4882a593Smuzhiyun static void cma_heap_dma_buf_release(struct dma_buf *dmabuf)
244*4882a593Smuzhiyun {
245*4882a593Smuzhiyun struct cma_heap_buffer *buffer = dmabuf->priv;
246*4882a593Smuzhiyun struct cma_heap *cma_heap = buffer->heap;
247*4882a593Smuzhiyun
248*4882a593Smuzhiyun if (buffer->vmap_cnt > 0) {
249*4882a593Smuzhiyun WARN(1, "%s: buffer still mapped in the kernel\n", __func__);
250*4882a593Smuzhiyun vunmap(buffer->vaddr);
251*4882a593Smuzhiyun }
252*4882a593Smuzhiyun
253*4882a593Smuzhiyun /* free page list */
254*4882a593Smuzhiyun kfree(buffer->pages);
255*4882a593Smuzhiyun /* release memory */
256*4882a593Smuzhiyun cma_release(cma_heap->cma, buffer->cma_pages, buffer->pagecount);
257*4882a593Smuzhiyun kfree(buffer);
258*4882a593Smuzhiyun }
259*4882a593Smuzhiyun
260*4882a593Smuzhiyun static const struct dma_buf_ops cma_heap_buf_ops = {
261*4882a593Smuzhiyun .attach = cma_heap_attach,
262*4882a593Smuzhiyun .detach = cma_heap_detach,
263*4882a593Smuzhiyun .map_dma_buf = cma_heap_map_dma_buf,
264*4882a593Smuzhiyun .unmap_dma_buf = cma_heap_unmap_dma_buf,
265*4882a593Smuzhiyun .begin_cpu_access = cma_heap_dma_buf_begin_cpu_access,
266*4882a593Smuzhiyun .end_cpu_access = cma_heap_dma_buf_end_cpu_access,
267*4882a593Smuzhiyun .mmap = cma_heap_mmap,
268*4882a593Smuzhiyun .vmap = cma_heap_vmap,
269*4882a593Smuzhiyun .vunmap = cma_heap_vunmap,
270*4882a593Smuzhiyun .release = cma_heap_dma_buf_release,
271*4882a593Smuzhiyun };
272*4882a593Smuzhiyun
cma_heap_allocate(struct dma_heap * heap,unsigned long len,unsigned long fd_flags,unsigned long heap_flags)273*4882a593Smuzhiyun static struct dma_buf *cma_heap_allocate(struct dma_heap *heap,
274*4882a593Smuzhiyun unsigned long len,
275*4882a593Smuzhiyun unsigned long fd_flags,
276*4882a593Smuzhiyun unsigned long heap_flags)
277*4882a593Smuzhiyun {
278*4882a593Smuzhiyun struct cma_heap *cma_heap = dma_heap_get_drvdata(heap);
279*4882a593Smuzhiyun struct cma_heap_buffer *buffer;
280*4882a593Smuzhiyun DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
281*4882a593Smuzhiyun size_t size = PAGE_ALIGN(len);
282*4882a593Smuzhiyun pgoff_t pagecount = size >> PAGE_SHIFT;
283*4882a593Smuzhiyun unsigned long align = get_order(size);
284*4882a593Smuzhiyun struct page *cma_pages;
285*4882a593Smuzhiyun struct dma_buf *dmabuf;
286*4882a593Smuzhiyun int ret = -ENOMEM;
287*4882a593Smuzhiyun pgoff_t pg;
288*4882a593Smuzhiyun
289*4882a593Smuzhiyun buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
290*4882a593Smuzhiyun if (!buffer)
291*4882a593Smuzhiyun return ERR_PTR(-ENOMEM);
292*4882a593Smuzhiyun
293*4882a593Smuzhiyun INIT_LIST_HEAD(&buffer->attachments);
294*4882a593Smuzhiyun mutex_init(&buffer->lock);
295*4882a593Smuzhiyun buffer->len = size;
296*4882a593Smuzhiyun
297*4882a593Smuzhiyun if (align > CONFIG_CMA_ALIGNMENT)
298*4882a593Smuzhiyun align = CONFIG_CMA_ALIGNMENT;
299*4882a593Smuzhiyun
300*4882a593Smuzhiyun cma_pages = cma_alloc(cma_heap->cma, pagecount, align, GFP_KERNEL);
301*4882a593Smuzhiyun if (!cma_pages)
302*4882a593Smuzhiyun goto free_buffer;
303*4882a593Smuzhiyun
304*4882a593Smuzhiyun /* Clear the cma pages */
305*4882a593Smuzhiyun if (PageHighMem(cma_pages)) {
306*4882a593Smuzhiyun unsigned long nr_clear_pages = pagecount;
307*4882a593Smuzhiyun struct page *page = cma_pages;
308*4882a593Smuzhiyun
309*4882a593Smuzhiyun while (nr_clear_pages > 0) {
310*4882a593Smuzhiyun void *vaddr = kmap_atomic(page);
311*4882a593Smuzhiyun
312*4882a593Smuzhiyun memset(vaddr, 0, PAGE_SIZE);
313*4882a593Smuzhiyun kunmap_atomic(vaddr);
314*4882a593Smuzhiyun /*
315*4882a593Smuzhiyun * Avoid wasting time zeroing memory if the process
316*4882a593Smuzhiyun * has been killed by by SIGKILL
317*4882a593Smuzhiyun */
318*4882a593Smuzhiyun if (fatal_signal_pending(current))
319*4882a593Smuzhiyun goto free_cma;
320*4882a593Smuzhiyun page++;
321*4882a593Smuzhiyun nr_clear_pages--;
322*4882a593Smuzhiyun }
323*4882a593Smuzhiyun } else {
324*4882a593Smuzhiyun memset(page_address(cma_pages), 0, size);
325*4882a593Smuzhiyun }
326*4882a593Smuzhiyun
327*4882a593Smuzhiyun buffer->pages = kmalloc_array(pagecount, sizeof(*buffer->pages), GFP_KERNEL);
328*4882a593Smuzhiyun if (!buffer->pages) {
329*4882a593Smuzhiyun ret = -ENOMEM;
330*4882a593Smuzhiyun goto free_cma;
331*4882a593Smuzhiyun }
332*4882a593Smuzhiyun
333*4882a593Smuzhiyun for (pg = 0; pg < pagecount; pg++)
334*4882a593Smuzhiyun buffer->pages[pg] = &cma_pages[pg];
335*4882a593Smuzhiyun
336*4882a593Smuzhiyun buffer->cma_pages = cma_pages;
337*4882a593Smuzhiyun buffer->heap = cma_heap;
338*4882a593Smuzhiyun buffer->pagecount = pagecount;
339*4882a593Smuzhiyun
340*4882a593Smuzhiyun /* create the dmabuf */
341*4882a593Smuzhiyun exp_info.exp_name = dma_heap_get_name(heap);
342*4882a593Smuzhiyun exp_info.ops = &cma_heap_buf_ops;
343*4882a593Smuzhiyun exp_info.size = buffer->len;
344*4882a593Smuzhiyun exp_info.flags = fd_flags;
345*4882a593Smuzhiyun exp_info.priv = buffer;
346*4882a593Smuzhiyun dmabuf = dma_buf_export(&exp_info);
347*4882a593Smuzhiyun if (IS_ERR(dmabuf)) {
348*4882a593Smuzhiyun ret = PTR_ERR(dmabuf);
349*4882a593Smuzhiyun goto free_pages;
350*4882a593Smuzhiyun }
351*4882a593Smuzhiyun
352*4882a593Smuzhiyun return dmabuf;
353*4882a593Smuzhiyun
354*4882a593Smuzhiyun free_pages:
355*4882a593Smuzhiyun kfree(buffer->pages);
356*4882a593Smuzhiyun free_cma:
357*4882a593Smuzhiyun cma_release(cma_heap->cma, cma_pages, pagecount);
358*4882a593Smuzhiyun free_buffer:
359*4882a593Smuzhiyun kfree(buffer);
360*4882a593Smuzhiyun
361*4882a593Smuzhiyun return ERR_PTR(ret);
362*4882a593Smuzhiyun }
363*4882a593Smuzhiyun
364*4882a593Smuzhiyun static const struct dma_heap_ops cma_heap_ops = {
365*4882a593Smuzhiyun .allocate = cma_heap_allocate,
366*4882a593Smuzhiyun };
367*4882a593Smuzhiyun
__add_cma_heap(struct cma * cma,void * data)368*4882a593Smuzhiyun static int __add_cma_heap(struct cma *cma, void *data)
369*4882a593Smuzhiyun {
370*4882a593Smuzhiyun struct cma_heap *cma_heap;
371*4882a593Smuzhiyun struct dma_heap_export_info exp_info;
372*4882a593Smuzhiyun
373*4882a593Smuzhiyun cma_heap = kzalloc(sizeof(*cma_heap), GFP_KERNEL);
374*4882a593Smuzhiyun if (!cma_heap)
375*4882a593Smuzhiyun return -ENOMEM;
376*4882a593Smuzhiyun cma_heap->cma = cma;
377*4882a593Smuzhiyun
378*4882a593Smuzhiyun exp_info.name = cma_get_name(cma);
379*4882a593Smuzhiyun exp_info.ops = &cma_heap_ops;
380*4882a593Smuzhiyun exp_info.priv = cma_heap;
381*4882a593Smuzhiyun
382*4882a593Smuzhiyun cma_heap->heap = dma_heap_add(&exp_info);
383*4882a593Smuzhiyun if (IS_ERR(cma_heap->heap)) {
384*4882a593Smuzhiyun int ret = PTR_ERR(cma_heap->heap);
385*4882a593Smuzhiyun
386*4882a593Smuzhiyun kfree(cma_heap);
387*4882a593Smuzhiyun return ret;
388*4882a593Smuzhiyun }
389*4882a593Smuzhiyun
390*4882a593Smuzhiyun return 0;
391*4882a593Smuzhiyun }
392*4882a593Smuzhiyun
add_default_cma_heap(void)393*4882a593Smuzhiyun static int add_default_cma_heap(void)
394*4882a593Smuzhiyun {
395*4882a593Smuzhiyun struct cma *default_cma = dev_get_cma_area(NULL);
396*4882a593Smuzhiyun int ret = 0;
397*4882a593Smuzhiyun
398*4882a593Smuzhiyun if (default_cma)
399*4882a593Smuzhiyun ret = __add_cma_heap(default_cma, NULL);
400*4882a593Smuzhiyun
401*4882a593Smuzhiyun return ret;
402*4882a593Smuzhiyun }
403*4882a593Smuzhiyun module_init(add_default_cma_heap);
404*4882a593Smuzhiyun MODULE_DESCRIPTION("DMA-BUF CMA Heap");
405*4882a593Smuzhiyun MODULE_LICENSE("GPL v2");
406