1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * DMABUF CMA heap exporter
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * Copyright (C) 2012, 2019, 2020 Linaro Ltd.
6*4882a593Smuzhiyun * Author: <benjamin.gaignard@linaro.org> for ST-Ericsson.
7*4882a593Smuzhiyun *
8*4882a593Smuzhiyun * Also utilizing parts of Andrew Davis' SRAM heap:
9*4882a593Smuzhiyun * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com/
10*4882a593Smuzhiyun * Andrew F. Davis <afd@ti.com>
11*4882a593Smuzhiyun *
12*4882a593Smuzhiyun * Copyright (C) 2021, 2022 Rockchip Electronics Co. Ltd.
13*4882a593Smuzhiyun */
14*4882a593Smuzhiyun
15*4882a593Smuzhiyun #include <linux/cma.h>
16*4882a593Smuzhiyun #include <linux/dma-buf.h>
17*4882a593Smuzhiyun #include <linux/dma-heap.h>
18*4882a593Smuzhiyun #include <linux/dma-map-ops.h>
19*4882a593Smuzhiyun #include <linux/err.h>
20*4882a593Smuzhiyun #include <linux/highmem.h>
21*4882a593Smuzhiyun #include <linux/io.h>
22*4882a593Smuzhiyun #include <linux/mm.h>
23*4882a593Smuzhiyun #include <linux/module.h>
24*4882a593Smuzhiyun #include <linux/scatterlist.h>
25*4882a593Smuzhiyun #include <linux/slab.h>
26*4882a593Smuzhiyun #include <linux/vmalloc.h>
27*4882a593Smuzhiyun #include <uapi/linux/dma-heap.h>
28*4882a593Smuzhiyun
29*4882a593Smuzhiyun struct cma_heap {
30*4882a593Smuzhiyun struct dma_heap *heap;
31*4882a593Smuzhiyun struct cma *cma;
32*4882a593Smuzhiyun };
33*4882a593Smuzhiyun
34*4882a593Smuzhiyun struct cma_heap_buffer {
35*4882a593Smuzhiyun struct cma_heap *heap;
36*4882a593Smuzhiyun struct list_head attachments;
37*4882a593Smuzhiyun struct mutex lock;
38*4882a593Smuzhiyun unsigned long len;
39*4882a593Smuzhiyun struct page *cma_pages;
40*4882a593Smuzhiyun struct page **pages;
41*4882a593Smuzhiyun pgoff_t pagecount;
42*4882a593Smuzhiyun int vmap_cnt;
43*4882a593Smuzhiyun void *vaddr;
44*4882a593Smuzhiyun
45*4882a593Smuzhiyun bool uncached;
46*4882a593Smuzhiyun };
47*4882a593Smuzhiyun
48*4882a593Smuzhiyun struct dma_heap_attachment {
49*4882a593Smuzhiyun struct device *dev;
50*4882a593Smuzhiyun struct sg_table table;
51*4882a593Smuzhiyun struct list_head list;
52*4882a593Smuzhiyun bool mapped;
53*4882a593Smuzhiyun
54*4882a593Smuzhiyun bool uncached;
55*4882a593Smuzhiyun };
56*4882a593Smuzhiyun
cma_heap_attach(struct dma_buf * dmabuf,struct dma_buf_attachment * attachment)57*4882a593Smuzhiyun static int cma_heap_attach(struct dma_buf *dmabuf,
58*4882a593Smuzhiyun struct dma_buf_attachment *attachment)
59*4882a593Smuzhiyun {
60*4882a593Smuzhiyun struct cma_heap_buffer *buffer = dmabuf->priv;
61*4882a593Smuzhiyun struct dma_heap_attachment *a;
62*4882a593Smuzhiyun int ret;
63*4882a593Smuzhiyun
64*4882a593Smuzhiyun a = kzalloc(sizeof(*a), GFP_KERNEL);
65*4882a593Smuzhiyun if (!a)
66*4882a593Smuzhiyun return -ENOMEM;
67*4882a593Smuzhiyun
68*4882a593Smuzhiyun ret = sg_alloc_table_from_pages(&a->table, buffer->pages,
69*4882a593Smuzhiyun buffer->pagecount, 0,
70*4882a593Smuzhiyun buffer->pagecount << PAGE_SHIFT,
71*4882a593Smuzhiyun GFP_KERNEL);
72*4882a593Smuzhiyun if (ret) {
73*4882a593Smuzhiyun kfree(a);
74*4882a593Smuzhiyun return ret;
75*4882a593Smuzhiyun }
76*4882a593Smuzhiyun
77*4882a593Smuzhiyun a->dev = attachment->dev;
78*4882a593Smuzhiyun INIT_LIST_HEAD(&a->list);
79*4882a593Smuzhiyun a->mapped = false;
80*4882a593Smuzhiyun
81*4882a593Smuzhiyun a->uncached = buffer->uncached;
82*4882a593Smuzhiyun attachment->priv = a;
83*4882a593Smuzhiyun
84*4882a593Smuzhiyun mutex_lock(&buffer->lock);
85*4882a593Smuzhiyun list_add(&a->list, &buffer->attachments);
86*4882a593Smuzhiyun mutex_unlock(&buffer->lock);
87*4882a593Smuzhiyun
88*4882a593Smuzhiyun return 0;
89*4882a593Smuzhiyun }
90*4882a593Smuzhiyun
cma_heap_detach(struct dma_buf * dmabuf,struct dma_buf_attachment * attachment)91*4882a593Smuzhiyun static void cma_heap_detach(struct dma_buf *dmabuf,
92*4882a593Smuzhiyun struct dma_buf_attachment *attachment)
93*4882a593Smuzhiyun {
94*4882a593Smuzhiyun struct cma_heap_buffer *buffer = dmabuf->priv;
95*4882a593Smuzhiyun struct dma_heap_attachment *a = attachment->priv;
96*4882a593Smuzhiyun
97*4882a593Smuzhiyun mutex_lock(&buffer->lock);
98*4882a593Smuzhiyun list_del(&a->list);
99*4882a593Smuzhiyun mutex_unlock(&buffer->lock);
100*4882a593Smuzhiyun
101*4882a593Smuzhiyun sg_free_table(&a->table);
102*4882a593Smuzhiyun kfree(a);
103*4882a593Smuzhiyun }
104*4882a593Smuzhiyun
cma_heap_map_dma_buf(struct dma_buf_attachment * attachment,enum dma_data_direction direction)105*4882a593Smuzhiyun static struct sg_table *cma_heap_map_dma_buf(struct dma_buf_attachment *attachment,
106*4882a593Smuzhiyun enum dma_data_direction direction)
107*4882a593Smuzhiyun {
108*4882a593Smuzhiyun struct dma_heap_attachment *a = attachment->priv;
109*4882a593Smuzhiyun struct sg_table *table = &a->table;
110*4882a593Smuzhiyun int attrs = attachment->dma_map_attrs;
111*4882a593Smuzhiyun int ret;
112*4882a593Smuzhiyun
113*4882a593Smuzhiyun if (a->uncached)
114*4882a593Smuzhiyun attrs |= DMA_ATTR_SKIP_CPU_SYNC;
115*4882a593Smuzhiyun
116*4882a593Smuzhiyun ret = dma_map_sgtable(attachment->dev, table, direction, attrs);
117*4882a593Smuzhiyun if (ret)
118*4882a593Smuzhiyun return ERR_PTR(-ENOMEM);
119*4882a593Smuzhiyun a->mapped = true;
120*4882a593Smuzhiyun return table;
121*4882a593Smuzhiyun }
122*4882a593Smuzhiyun
cma_heap_unmap_dma_buf(struct dma_buf_attachment * attachment,struct sg_table * table,enum dma_data_direction direction)123*4882a593Smuzhiyun static void cma_heap_unmap_dma_buf(struct dma_buf_attachment *attachment,
124*4882a593Smuzhiyun struct sg_table *table,
125*4882a593Smuzhiyun enum dma_data_direction direction)
126*4882a593Smuzhiyun {
127*4882a593Smuzhiyun struct dma_heap_attachment *a = attachment->priv;
128*4882a593Smuzhiyun int attrs = attachment->dma_map_attrs;
129*4882a593Smuzhiyun
130*4882a593Smuzhiyun a->mapped = false;
131*4882a593Smuzhiyun
132*4882a593Smuzhiyun if (a->uncached)
133*4882a593Smuzhiyun attrs |= DMA_ATTR_SKIP_CPU_SYNC;
134*4882a593Smuzhiyun
135*4882a593Smuzhiyun dma_unmap_sgtable(attachment->dev, table, direction, attrs);
136*4882a593Smuzhiyun }
137*4882a593Smuzhiyun
138*4882a593Smuzhiyun static int __maybe_unused
cma_heap_dma_buf_begin_cpu_access_partial(struct dma_buf * dmabuf,enum dma_data_direction direction,unsigned int offset,unsigned int len)139*4882a593Smuzhiyun cma_heap_dma_buf_begin_cpu_access_partial(struct dma_buf *dmabuf,
140*4882a593Smuzhiyun enum dma_data_direction direction,
141*4882a593Smuzhiyun unsigned int offset,
142*4882a593Smuzhiyun unsigned int len)
143*4882a593Smuzhiyun {
144*4882a593Smuzhiyun struct cma_heap_buffer *buffer = dmabuf->priv;
145*4882a593Smuzhiyun phys_addr_t phys = page_to_phys(buffer->cma_pages);
146*4882a593Smuzhiyun
147*4882a593Smuzhiyun if (buffer->vmap_cnt)
148*4882a593Smuzhiyun invalidate_kernel_vmap_range(buffer->vaddr, buffer->len);
149*4882a593Smuzhiyun
150*4882a593Smuzhiyun if (buffer->uncached)
151*4882a593Smuzhiyun return 0;
152*4882a593Smuzhiyun
153*4882a593Smuzhiyun mutex_lock(&buffer->lock);
154*4882a593Smuzhiyun dma_sync_single_for_cpu(dma_heap_get_dev(buffer->heap->heap),
155*4882a593Smuzhiyun phys + offset,
156*4882a593Smuzhiyun len,
157*4882a593Smuzhiyun direction);
158*4882a593Smuzhiyun mutex_unlock(&buffer->lock);
159*4882a593Smuzhiyun
160*4882a593Smuzhiyun return 0;
161*4882a593Smuzhiyun }
162*4882a593Smuzhiyun
163*4882a593Smuzhiyun static int __maybe_unused
cma_heap_dma_buf_end_cpu_access_partial(struct dma_buf * dmabuf,enum dma_data_direction direction,unsigned int offset,unsigned int len)164*4882a593Smuzhiyun cma_heap_dma_buf_end_cpu_access_partial(struct dma_buf *dmabuf,
165*4882a593Smuzhiyun enum dma_data_direction direction,
166*4882a593Smuzhiyun unsigned int offset,
167*4882a593Smuzhiyun unsigned int len)
168*4882a593Smuzhiyun {
169*4882a593Smuzhiyun struct cma_heap_buffer *buffer = dmabuf->priv;
170*4882a593Smuzhiyun phys_addr_t phys = page_to_phys(buffer->cma_pages);
171*4882a593Smuzhiyun
172*4882a593Smuzhiyun if (buffer->vmap_cnt)
173*4882a593Smuzhiyun flush_kernel_vmap_range(buffer->vaddr, buffer->len);
174*4882a593Smuzhiyun
175*4882a593Smuzhiyun if (buffer->uncached)
176*4882a593Smuzhiyun return 0;
177*4882a593Smuzhiyun
178*4882a593Smuzhiyun mutex_lock(&buffer->lock);
179*4882a593Smuzhiyun dma_sync_single_for_device(dma_heap_get_dev(buffer->heap->heap),
180*4882a593Smuzhiyun phys + offset,
181*4882a593Smuzhiyun len,
182*4882a593Smuzhiyun direction);
183*4882a593Smuzhiyun mutex_unlock(&buffer->lock);
184*4882a593Smuzhiyun
185*4882a593Smuzhiyun return 0;
186*4882a593Smuzhiyun }
187*4882a593Smuzhiyun
cma_heap_dma_buf_begin_cpu_access(struct dma_buf * dmabuf,enum dma_data_direction direction)188*4882a593Smuzhiyun static int cma_heap_dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
189*4882a593Smuzhiyun enum dma_data_direction direction)
190*4882a593Smuzhiyun {
191*4882a593Smuzhiyun struct cma_heap_buffer *buffer = dmabuf->priv;
192*4882a593Smuzhiyun struct dma_heap_attachment *a;
193*4882a593Smuzhiyun
194*4882a593Smuzhiyun if (buffer->vmap_cnt)
195*4882a593Smuzhiyun invalidate_kernel_vmap_range(buffer->vaddr, buffer->len);
196*4882a593Smuzhiyun
197*4882a593Smuzhiyun mutex_lock(&buffer->lock);
198*4882a593Smuzhiyun list_for_each_entry(a, &buffer->attachments, list) {
199*4882a593Smuzhiyun if (!a->mapped)
200*4882a593Smuzhiyun continue;
201*4882a593Smuzhiyun dma_sync_sgtable_for_cpu(a->dev, &a->table, direction);
202*4882a593Smuzhiyun }
203*4882a593Smuzhiyun mutex_unlock(&buffer->lock);
204*4882a593Smuzhiyun
205*4882a593Smuzhiyun return 0;
206*4882a593Smuzhiyun }
207*4882a593Smuzhiyun
cma_heap_dma_buf_end_cpu_access(struct dma_buf * dmabuf,enum dma_data_direction direction)208*4882a593Smuzhiyun static int cma_heap_dma_buf_end_cpu_access(struct dma_buf *dmabuf,
209*4882a593Smuzhiyun enum dma_data_direction direction)
210*4882a593Smuzhiyun {
211*4882a593Smuzhiyun struct cma_heap_buffer *buffer = dmabuf->priv;
212*4882a593Smuzhiyun struct dma_heap_attachment *a;
213*4882a593Smuzhiyun
214*4882a593Smuzhiyun if (buffer->vmap_cnt)
215*4882a593Smuzhiyun flush_kernel_vmap_range(buffer->vaddr, buffer->len);
216*4882a593Smuzhiyun
217*4882a593Smuzhiyun mutex_lock(&buffer->lock);
218*4882a593Smuzhiyun list_for_each_entry(a, &buffer->attachments, list) {
219*4882a593Smuzhiyun if (!a->mapped)
220*4882a593Smuzhiyun continue;
221*4882a593Smuzhiyun dma_sync_sgtable_for_device(a->dev, &a->table, direction);
222*4882a593Smuzhiyun }
223*4882a593Smuzhiyun mutex_unlock(&buffer->lock);
224*4882a593Smuzhiyun
225*4882a593Smuzhiyun return 0;
226*4882a593Smuzhiyun }
227*4882a593Smuzhiyun
cma_heap_vm_fault(struct vm_fault * vmf)228*4882a593Smuzhiyun static vm_fault_t cma_heap_vm_fault(struct vm_fault *vmf)
229*4882a593Smuzhiyun {
230*4882a593Smuzhiyun struct vm_area_struct *vma = vmf->vma;
231*4882a593Smuzhiyun struct cma_heap_buffer *buffer = vma->vm_private_data;
232*4882a593Smuzhiyun
233*4882a593Smuzhiyun if (vmf->pgoff > buffer->pagecount)
234*4882a593Smuzhiyun return VM_FAULT_SIGBUS;
235*4882a593Smuzhiyun
236*4882a593Smuzhiyun vmf->page = buffer->pages[vmf->pgoff];
237*4882a593Smuzhiyun get_page(vmf->page);
238*4882a593Smuzhiyun
239*4882a593Smuzhiyun return 0;
240*4882a593Smuzhiyun }
241*4882a593Smuzhiyun
242*4882a593Smuzhiyun static const struct vm_operations_struct dma_heap_vm_ops = {
243*4882a593Smuzhiyun .fault = cma_heap_vm_fault,
244*4882a593Smuzhiyun };
245*4882a593Smuzhiyun
cma_heap_mmap(struct dma_buf * dmabuf,struct vm_area_struct * vma)246*4882a593Smuzhiyun static int cma_heap_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
247*4882a593Smuzhiyun {
248*4882a593Smuzhiyun struct cma_heap_buffer *buffer = dmabuf->priv;
249*4882a593Smuzhiyun
250*4882a593Smuzhiyun if ((vma->vm_flags & (VM_SHARED | VM_MAYSHARE)) == 0)
251*4882a593Smuzhiyun return -EINVAL;
252*4882a593Smuzhiyun
253*4882a593Smuzhiyun if (buffer->uncached)
254*4882a593Smuzhiyun vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
255*4882a593Smuzhiyun
256*4882a593Smuzhiyun vma->vm_ops = &dma_heap_vm_ops;
257*4882a593Smuzhiyun vma->vm_private_data = buffer;
258*4882a593Smuzhiyun
259*4882a593Smuzhiyun return 0;
260*4882a593Smuzhiyun }
261*4882a593Smuzhiyun
cma_heap_do_vmap(struct cma_heap_buffer * buffer)262*4882a593Smuzhiyun static void *cma_heap_do_vmap(struct cma_heap_buffer *buffer)
263*4882a593Smuzhiyun {
264*4882a593Smuzhiyun void *vaddr;
265*4882a593Smuzhiyun pgprot_t pgprot = PAGE_KERNEL;
266*4882a593Smuzhiyun
267*4882a593Smuzhiyun if (buffer->uncached)
268*4882a593Smuzhiyun pgprot = pgprot_writecombine(PAGE_KERNEL);
269*4882a593Smuzhiyun
270*4882a593Smuzhiyun vaddr = vmap(buffer->pages, buffer->pagecount, VM_MAP, pgprot);
271*4882a593Smuzhiyun if (!vaddr)
272*4882a593Smuzhiyun return ERR_PTR(-ENOMEM);
273*4882a593Smuzhiyun
274*4882a593Smuzhiyun return vaddr;
275*4882a593Smuzhiyun }
276*4882a593Smuzhiyun
cma_heap_vmap(struct dma_buf * dmabuf)277*4882a593Smuzhiyun static void *cma_heap_vmap(struct dma_buf *dmabuf)
278*4882a593Smuzhiyun {
279*4882a593Smuzhiyun struct cma_heap_buffer *buffer = dmabuf->priv;
280*4882a593Smuzhiyun void *vaddr;
281*4882a593Smuzhiyun
282*4882a593Smuzhiyun mutex_lock(&buffer->lock);
283*4882a593Smuzhiyun if (buffer->vmap_cnt) {
284*4882a593Smuzhiyun buffer->vmap_cnt++;
285*4882a593Smuzhiyun vaddr = buffer->vaddr;
286*4882a593Smuzhiyun goto out;
287*4882a593Smuzhiyun }
288*4882a593Smuzhiyun
289*4882a593Smuzhiyun vaddr = cma_heap_do_vmap(buffer);
290*4882a593Smuzhiyun if (IS_ERR(vaddr))
291*4882a593Smuzhiyun goto out;
292*4882a593Smuzhiyun
293*4882a593Smuzhiyun buffer->vaddr = vaddr;
294*4882a593Smuzhiyun buffer->vmap_cnt++;
295*4882a593Smuzhiyun out:
296*4882a593Smuzhiyun mutex_unlock(&buffer->lock);
297*4882a593Smuzhiyun
298*4882a593Smuzhiyun return vaddr;
299*4882a593Smuzhiyun }
300*4882a593Smuzhiyun
cma_heap_vunmap(struct dma_buf * dmabuf,void * vaddr)301*4882a593Smuzhiyun static void cma_heap_vunmap(struct dma_buf *dmabuf, void *vaddr)
302*4882a593Smuzhiyun {
303*4882a593Smuzhiyun struct cma_heap_buffer *buffer = dmabuf->priv;
304*4882a593Smuzhiyun
305*4882a593Smuzhiyun mutex_lock(&buffer->lock);
306*4882a593Smuzhiyun if (!--buffer->vmap_cnt) {
307*4882a593Smuzhiyun vunmap(buffer->vaddr);
308*4882a593Smuzhiyun buffer->vaddr = NULL;
309*4882a593Smuzhiyun }
310*4882a593Smuzhiyun mutex_unlock(&buffer->lock);
311*4882a593Smuzhiyun }
312*4882a593Smuzhiyun
cma_heap_dma_buf_release(struct dma_buf * dmabuf)313*4882a593Smuzhiyun static void cma_heap_dma_buf_release(struct dma_buf *dmabuf)
314*4882a593Smuzhiyun {
315*4882a593Smuzhiyun struct cma_heap_buffer *buffer = dmabuf->priv;
316*4882a593Smuzhiyun struct cma_heap *cma_heap = buffer->heap;
317*4882a593Smuzhiyun
318*4882a593Smuzhiyun if (buffer->vmap_cnt > 0) {
319*4882a593Smuzhiyun WARN(1, "%s: buffer still mapped in the kernel\n", __func__);
320*4882a593Smuzhiyun vunmap(buffer->vaddr);
321*4882a593Smuzhiyun }
322*4882a593Smuzhiyun
323*4882a593Smuzhiyun /* free page list */
324*4882a593Smuzhiyun kfree(buffer->pages);
325*4882a593Smuzhiyun /* release memory */
326*4882a593Smuzhiyun cma_release(cma_heap->cma, buffer->cma_pages, buffer->pagecount);
327*4882a593Smuzhiyun kfree(buffer);
328*4882a593Smuzhiyun }
329*4882a593Smuzhiyun
330*4882a593Smuzhiyun static const struct dma_buf_ops cma_heap_buf_ops = {
331*4882a593Smuzhiyun .attach = cma_heap_attach,
332*4882a593Smuzhiyun .detach = cma_heap_detach,
333*4882a593Smuzhiyun .map_dma_buf = cma_heap_map_dma_buf,
334*4882a593Smuzhiyun .unmap_dma_buf = cma_heap_unmap_dma_buf,
335*4882a593Smuzhiyun .begin_cpu_access = cma_heap_dma_buf_begin_cpu_access,
336*4882a593Smuzhiyun .end_cpu_access = cma_heap_dma_buf_end_cpu_access,
337*4882a593Smuzhiyun #ifdef CONFIG_DMABUF_PARTIAL
338*4882a593Smuzhiyun .begin_cpu_access_partial = cma_heap_dma_buf_begin_cpu_access_partial,
339*4882a593Smuzhiyun .end_cpu_access_partial = cma_heap_dma_buf_end_cpu_access_partial,
340*4882a593Smuzhiyun #endif
341*4882a593Smuzhiyun .mmap = cma_heap_mmap,
342*4882a593Smuzhiyun .vmap = cma_heap_vmap,
343*4882a593Smuzhiyun .vunmap = cma_heap_vunmap,
344*4882a593Smuzhiyun .release = cma_heap_dma_buf_release,
345*4882a593Smuzhiyun };
346*4882a593Smuzhiyun
cma_heap_do_allocate(struct dma_heap * heap,unsigned long len,unsigned long fd_flags,unsigned long heap_flags,bool uncached)347*4882a593Smuzhiyun static struct dma_buf *cma_heap_do_allocate(struct dma_heap *heap,
348*4882a593Smuzhiyun unsigned long len,
349*4882a593Smuzhiyun unsigned long fd_flags,
350*4882a593Smuzhiyun unsigned long heap_flags, bool uncached)
351*4882a593Smuzhiyun {
352*4882a593Smuzhiyun struct cma_heap *cma_heap = dma_heap_get_drvdata(heap);
353*4882a593Smuzhiyun struct cma_heap_buffer *buffer;
354*4882a593Smuzhiyun DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
355*4882a593Smuzhiyun size_t size = PAGE_ALIGN(len);
356*4882a593Smuzhiyun pgoff_t pagecount = size >> PAGE_SHIFT;
357*4882a593Smuzhiyun unsigned long align = get_order(size);
358*4882a593Smuzhiyun struct page *cma_pages;
359*4882a593Smuzhiyun struct dma_buf *dmabuf;
360*4882a593Smuzhiyun int ret = -ENOMEM;
361*4882a593Smuzhiyun pgoff_t pg;
362*4882a593Smuzhiyun dma_addr_t dma;
363*4882a593Smuzhiyun
364*4882a593Smuzhiyun buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
365*4882a593Smuzhiyun if (!buffer)
366*4882a593Smuzhiyun return ERR_PTR(-ENOMEM);
367*4882a593Smuzhiyun
368*4882a593Smuzhiyun buffer->uncached = uncached;
369*4882a593Smuzhiyun
370*4882a593Smuzhiyun INIT_LIST_HEAD(&buffer->attachments);
371*4882a593Smuzhiyun mutex_init(&buffer->lock);
372*4882a593Smuzhiyun buffer->len = size;
373*4882a593Smuzhiyun
374*4882a593Smuzhiyun if (align > CONFIG_CMA_ALIGNMENT)
375*4882a593Smuzhiyun align = CONFIG_CMA_ALIGNMENT;
376*4882a593Smuzhiyun
377*4882a593Smuzhiyun cma_pages = cma_alloc(cma_heap->cma, pagecount, align, GFP_KERNEL);
378*4882a593Smuzhiyun if (!cma_pages)
379*4882a593Smuzhiyun goto free_buffer;
380*4882a593Smuzhiyun
381*4882a593Smuzhiyun /* Clear the cma pages */
382*4882a593Smuzhiyun if (PageHighMem(cma_pages)) {
383*4882a593Smuzhiyun unsigned long nr_clear_pages = pagecount;
384*4882a593Smuzhiyun struct page *page = cma_pages;
385*4882a593Smuzhiyun
386*4882a593Smuzhiyun while (nr_clear_pages > 0) {
387*4882a593Smuzhiyun void *vaddr = kmap_atomic(page);
388*4882a593Smuzhiyun
389*4882a593Smuzhiyun memset(vaddr, 0, PAGE_SIZE);
390*4882a593Smuzhiyun kunmap_atomic(vaddr);
391*4882a593Smuzhiyun /*
392*4882a593Smuzhiyun * Avoid wasting time zeroing memory if the process
393*4882a593Smuzhiyun * has been killed by by SIGKILL
394*4882a593Smuzhiyun */
395*4882a593Smuzhiyun if (fatal_signal_pending(current))
396*4882a593Smuzhiyun goto free_cma;
397*4882a593Smuzhiyun page++;
398*4882a593Smuzhiyun nr_clear_pages--;
399*4882a593Smuzhiyun }
400*4882a593Smuzhiyun } else {
401*4882a593Smuzhiyun memset(page_address(cma_pages), 0, size);
402*4882a593Smuzhiyun }
403*4882a593Smuzhiyun
404*4882a593Smuzhiyun buffer->pages = kmalloc_array(pagecount, sizeof(*buffer->pages), GFP_KERNEL);
405*4882a593Smuzhiyun if (!buffer->pages) {
406*4882a593Smuzhiyun ret = -ENOMEM;
407*4882a593Smuzhiyun goto free_cma;
408*4882a593Smuzhiyun }
409*4882a593Smuzhiyun
410*4882a593Smuzhiyun for (pg = 0; pg < pagecount; pg++)
411*4882a593Smuzhiyun buffer->pages[pg] = &cma_pages[pg];
412*4882a593Smuzhiyun
413*4882a593Smuzhiyun buffer->cma_pages = cma_pages;
414*4882a593Smuzhiyun buffer->heap = cma_heap;
415*4882a593Smuzhiyun buffer->pagecount = pagecount;
416*4882a593Smuzhiyun
417*4882a593Smuzhiyun /* create the dmabuf */
418*4882a593Smuzhiyun exp_info.exp_name = dma_heap_get_name(heap);
419*4882a593Smuzhiyun exp_info.ops = &cma_heap_buf_ops;
420*4882a593Smuzhiyun exp_info.size = buffer->len;
421*4882a593Smuzhiyun exp_info.flags = fd_flags;
422*4882a593Smuzhiyun exp_info.priv = buffer;
423*4882a593Smuzhiyun dmabuf = dma_buf_export(&exp_info);
424*4882a593Smuzhiyun if (IS_ERR(dmabuf)) {
425*4882a593Smuzhiyun ret = PTR_ERR(dmabuf);
426*4882a593Smuzhiyun goto free_pages;
427*4882a593Smuzhiyun }
428*4882a593Smuzhiyun
429*4882a593Smuzhiyun if (buffer->uncached) {
430*4882a593Smuzhiyun dma = dma_map_page(dma_heap_get_dev(heap), buffer->cma_pages, 0,
431*4882a593Smuzhiyun buffer->pagecount * PAGE_SIZE, DMA_FROM_DEVICE);
432*4882a593Smuzhiyun dma_unmap_page(dma_heap_get_dev(heap), dma,
433*4882a593Smuzhiyun buffer->pagecount * PAGE_SIZE, DMA_FROM_DEVICE);
434*4882a593Smuzhiyun }
435*4882a593Smuzhiyun
436*4882a593Smuzhiyun return dmabuf;
437*4882a593Smuzhiyun
438*4882a593Smuzhiyun free_pages:
439*4882a593Smuzhiyun kfree(buffer->pages);
440*4882a593Smuzhiyun free_cma:
441*4882a593Smuzhiyun cma_release(cma_heap->cma, cma_pages, pagecount);
442*4882a593Smuzhiyun free_buffer:
443*4882a593Smuzhiyun kfree(buffer);
444*4882a593Smuzhiyun
445*4882a593Smuzhiyun return ERR_PTR(ret);
446*4882a593Smuzhiyun }
447*4882a593Smuzhiyun
cma_heap_allocate(struct dma_heap * heap,unsigned long len,unsigned long fd_flags,unsigned long heap_flags)448*4882a593Smuzhiyun static struct dma_buf *cma_heap_allocate(struct dma_heap *heap,
449*4882a593Smuzhiyun unsigned long len,
450*4882a593Smuzhiyun unsigned long fd_flags,
451*4882a593Smuzhiyun unsigned long heap_flags)
452*4882a593Smuzhiyun {
453*4882a593Smuzhiyun return cma_heap_do_allocate(heap, len, fd_flags, heap_flags, false);
454*4882a593Smuzhiyun }
455*4882a593Smuzhiyun
456*4882a593Smuzhiyun #if IS_ENABLED(CONFIG_NO_GKI)
cma_heap_get_phys(struct dma_heap * heap,struct dma_heap_phys_data * phys)457*4882a593Smuzhiyun static int cma_heap_get_phys(struct dma_heap *heap,
458*4882a593Smuzhiyun struct dma_heap_phys_data *phys)
459*4882a593Smuzhiyun {
460*4882a593Smuzhiyun struct cma_heap *cma_heap = dma_heap_get_drvdata(heap);
461*4882a593Smuzhiyun struct cma_heap_buffer *buffer;
462*4882a593Smuzhiyun struct dma_buf *dmabuf;
463*4882a593Smuzhiyun
464*4882a593Smuzhiyun phys->paddr = (__u64)-1;
465*4882a593Smuzhiyun
466*4882a593Smuzhiyun if (IS_ERR_OR_NULL(phys))
467*4882a593Smuzhiyun return -EINVAL;
468*4882a593Smuzhiyun
469*4882a593Smuzhiyun dmabuf = dma_buf_get(phys->fd);
470*4882a593Smuzhiyun if (IS_ERR_OR_NULL(dmabuf))
471*4882a593Smuzhiyun return -EBADFD;
472*4882a593Smuzhiyun
473*4882a593Smuzhiyun buffer = dmabuf->priv;
474*4882a593Smuzhiyun if (IS_ERR_OR_NULL(buffer))
475*4882a593Smuzhiyun goto err;
476*4882a593Smuzhiyun
477*4882a593Smuzhiyun if (buffer->heap != cma_heap)
478*4882a593Smuzhiyun goto err;
479*4882a593Smuzhiyun
480*4882a593Smuzhiyun phys->paddr = page_to_phys(buffer->cma_pages);
481*4882a593Smuzhiyun
482*4882a593Smuzhiyun err:
483*4882a593Smuzhiyun dma_buf_put(dmabuf);
484*4882a593Smuzhiyun
485*4882a593Smuzhiyun return (phys->paddr == (__u64)-1) ? -EINVAL : 0;
486*4882a593Smuzhiyun }
487*4882a593Smuzhiyun #endif
488*4882a593Smuzhiyun
489*4882a593Smuzhiyun static const struct dma_heap_ops cma_heap_ops = {
490*4882a593Smuzhiyun .allocate = cma_heap_allocate,
491*4882a593Smuzhiyun #if IS_ENABLED(CONFIG_NO_GKI)
492*4882a593Smuzhiyun .get_phys = cma_heap_get_phys,
493*4882a593Smuzhiyun #endif
494*4882a593Smuzhiyun };
495*4882a593Smuzhiyun
cma_uncached_heap_allocate(struct dma_heap * heap,unsigned long len,unsigned long fd_flags,unsigned long heap_flags)496*4882a593Smuzhiyun static struct dma_buf *cma_uncached_heap_allocate(struct dma_heap *heap,
497*4882a593Smuzhiyun unsigned long len,
498*4882a593Smuzhiyun unsigned long fd_flags,
499*4882a593Smuzhiyun unsigned long heap_flags)
500*4882a593Smuzhiyun {
501*4882a593Smuzhiyun return cma_heap_do_allocate(heap, len, fd_flags, heap_flags, true);
502*4882a593Smuzhiyun }
503*4882a593Smuzhiyun
cma_uncached_heap_not_initialized(struct dma_heap * heap,unsigned long len,unsigned long fd_flags,unsigned long heap_flags)504*4882a593Smuzhiyun static struct dma_buf *cma_uncached_heap_not_initialized(struct dma_heap *heap,
505*4882a593Smuzhiyun unsigned long len,
506*4882a593Smuzhiyun unsigned long fd_flags,
507*4882a593Smuzhiyun unsigned long heap_flags)
508*4882a593Smuzhiyun {
509*4882a593Smuzhiyun pr_info("heap %s not initialized\n", dma_heap_get_name(heap));
510*4882a593Smuzhiyun return ERR_PTR(-EBUSY);
511*4882a593Smuzhiyun }
512*4882a593Smuzhiyun
513*4882a593Smuzhiyun static struct dma_heap_ops cma_uncached_heap_ops = {
514*4882a593Smuzhiyun .allocate = cma_uncached_heap_not_initialized,
515*4882a593Smuzhiyun };
516*4882a593Smuzhiyun
set_heap_dev_dma(struct device * heap_dev)517*4882a593Smuzhiyun static int set_heap_dev_dma(struct device *heap_dev)
518*4882a593Smuzhiyun {
519*4882a593Smuzhiyun int err = 0;
520*4882a593Smuzhiyun
521*4882a593Smuzhiyun if (!heap_dev)
522*4882a593Smuzhiyun return -EINVAL;
523*4882a593Smuzhiyun
524*4882a593Smuzhiyun dma_coerce_mask_and_coherent(heap_dev, DMA_BIT_MASK(64));
525*4882a593Smuzhiyun
526*4882a593Smuzhiyun if (!heap_dev->dma_parms) {
527*4882a593Smuzhiyun heap_dev->dma_parms = devm_kzalloc(heap_dev,
528*4882a593Smuzhiyun sizeof(*heap_dev->dma_parms),
529*4882a593Smuzhiyun GFP_KERNEL);
530*4882a593Smuzhiyun if (!heap_dev->dma_parms)
531*4882a593Smuzhiyun return -ENOMEM;
532*4882a593Smuzhiyun
533*4882a593Smuzhiyun err = dma_set_max_seg_size(heap_dev, (unsigned int)DMA_BIT_MASK(64));
534*4882a593Smuzhiyun if (err) {
535*4882a593Smuzhiyun devm_kfree(heap_dev, heap_dev->dma_parms);
536*4882a593Smuzhiyun dev_err(heap_dev, "Failed to set DMA segment size, err:%d\n", err);
537*4882a593Smuzhiyun return err;
538*4882a593Smuzhiyun }
539*4882a593Smuzhiyun }
540*4882a593Smuzhiyun
541*4882a593Smuzhiyun return 0;
542*4882a593Smuzhiyun }
543*4882a593Smuzhiyun
__add_cma_heap(struct cma * cma,void * data)544*4882a593Smuzhiyun static int __add_cma_heap(struct cma *cma, void *data)
545*4882a593Smuzhiyun {
546*4882a593Smuzhiyun struct cma_heap *cma_heap, *cma_uncached_heap;
547*4882a593Smuzhiyun struct dma_heap_export_info exp_info;
548*4882a593Smuzhiyun int ret;
549*4882a593Smuzhiyun
550*4882a593Smuzhiyun cma_heap = kzalloc(sizeof(*cma_heap), GFP_KERNEL);
551*4882a593Smuzhiyun if (!cma_heap)
552*4882a593Smuzhiyun return -ENOMEM;
553*4882a593Smuzhiyun cma_heap->cma = cma;
554*4882a593Smuzhiyun
555*4882a593Smuzhiyun exp_info.name = "cma";
556*4882a593Smuzhiyun exp_info.ops = &cma_heap_ops;
557*4882a593Smuzhiyun exp_info.priv = cma_heap;
558*4882a593Smuzhiyun
559*4882a593Smuzhiyun cma_heap->heap = dma_heap_add(&exp_info);
560*4882a593Smuzhiyun if (IS_ERR(cma_heap->heap)) {
561*4882a593Smuzhiyun ret = PTR_ERR(cma_heap->heap);
562*4882a593Smuzhiyun goto free_cma_heap;
563*4882a593Smuzhiyun }
564*4882a593Smuzhiyun
565*4882a593Smuzhiyun cma_uncached_heap = kzalloc(sizeof(*cma_heap), GFP_KERNEL);
566*4882a593Smuzhiyun if (!cma_uncached_heap) {
567*4882a593Smuzhiyun ret = -ENOMEM;
568*4882a593Smuzhiyun goto put_cma_heap;
569*4882a593Smuzhiyun }
570*4882a593Smuzhiyun
571*4882a593Smuzhiyun cma_uncached_heap->cma = cma;
572*4882a593Smuzhiyun
573*4882a593Smuzhiyun exp_info.name = "cma-uncached";
574*4882a593Smuzhiyun exp_info.ops = &cma_uncached_heap_ops;
575*4882a593Smuzhiyun exp_info.priv = cma_uncached_heap;
576*4882a593Smuzhiyun
577*4882a593Smuzhiyun cma_uncached_heap->heap = dma_heap_add(&exp_info);
578*4882a593Smuzhiyun if (IS_ERR(cma_uncached_heap->heap)) {
579*4882a593Smuzhiyun ret = PTR_ERR(cma_uncached_heap->heap);
580*4882a593Smuzhiyun goto free_uncached_cma_heap;
581*4882a593Smuzhiyun }
582*4882a593Smuzhiyun
583*4882a593Smuzhiyun ret = set_heap_dev_dma(dma_heap_get_dev(cma_uncached_heap->heap));
584*4882a593Smuzhiyun if (ret)
585*4882a593Smuzhiyun goto put_uncached_cma_heap;
586*4882a593Smuzhiyun
587*4882a593Smuzhiyun mb(); /* make sure we only set allocate after dma_mask is set */
588*4882a593Smuzhiyun cma_uncached_heap_ops.allocate = cma_uncached_heap_allocate;
589*4882a593Smuzhiyun
590*4882a593Smuzhiyun return 0;
591*4882a593Smuzhiyun
592*4882a593Smuzhiyun put_uncached_cma_heap:
593*4882a593Smuzhiyun dma_heap_put(cma_uncached_heap->heap);
594*4882a593Smuzhiyun free_uncached_cma_heap:
595*4882a593Smuzhiyun kfree(cma_uncached_heap);
596*4882a593Smuzhiyun put_cma_heap:
597*4882a593Smuzhiyun dma_heap_put(cma_heap->heap);
598*4882a593Smuzhiyun free_cma_heap:
599*4882a593Smuzhiyun kfree(cma_heap);
600*4882a593Smuzhiyun
601*4882a593Smuzhiyun return ret;
602*4882a593Smuzhiyun }
603*4882a593Smuzhiyun
add_default_cma_heap(void)604*4882a593Smuzhiyun static int add_default_cma_heap(void)
605*4882a593Smuzhiyun {
606*4882a593Smuzhiyun struct cma *default_cma = dev_get_cma_area(NULL);
607*4882a593Smuzhiyun int ret = 0;
608*4882a593Smuzhiyun
609*4882a593Smuzhiyun if (default_cma)
610*4882a593Smuzhiyun ret = __add_cma_heap(default_cma, NULL);
611*4882a593Smuzhiyun
612*4882a593Smuzhiyun return ret;
613*4882a593Smuzhiyun }
614*4882a593Smuzhiyun module_init(add_default_cma_heap);
615*4882a593Smuzhiyun MODULE_DESCRIPTION("DMA-BUF CMA Heap");
616*4882a593Smuzhiyun MODULE_LICENSE("GPL");
617