1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * DMABUF CMA heap exporter
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * Copyright (C) 2012, 2019, 2020 Linaro Ltd.
6*4882a593Smuzhiyun * Author: <benjamin.gaignard@linaro.org> for ST-Ericsson.
7*4882a593Smuzhiyun *
8*4882a593Smuzhiyun * Also utilizing parts of Andrew Davis' SRAM heap:
9*4882a593Smuzhiyun * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com/
10*4882a593Smuzhiyun * Andrew F. Davis <afd@ti.com>
11*4882a593Smuzhiyun *
12*4882a593Smuzhiyun * Copyright (C) 2022 Rockchip Electronics Co. Ltd.
13*4882a593Smuzhiyun * Author: Simon Xue <xxm@rock-chips.com>
14*4882a593Smuzhiyun */
15*4882a593Smuzhiyun
16*4882a593Smuzhiyun #include <linux/cma.h>
17*4882a593Smuzhiyun #include <linux/dma-buf.h>
18*4882a593Smuzhiyun #include <linux/dma-map-ops.h>
19*4882a593Smuzhiyun #include <linux/err.h>
20*4882a593Smuzhiyun #include <linux/highmem.h>
21*4882a593Smuzhiyun #include <linux/io.h>
22*4882a593Smuzhiyun #include <linux/mm.h>
23*4882a593Smuzhiyun #include <linux/module.h>
24*4882a593Smuzhiyun #include <linux/scatterlist.h>
25*4882a593Smuzhiyun #include <linux/slab.h>
26*4882a593Smuzhiyun #include <linux/vmalloc.h>
27*4882a593Smuzhiyun #include <uapi/linux/rk-dma-heap.h>
28*4882a593Smuzhiyun #include <linux/proc_fs.h>
29*4882a593Smuzhiyun #include "../../../mm/cma.h"
30*4882a593Smuzhiyun #include "rk-dma-heap.h"
31*4882a593Smuzhiyun
32*4882a593Smuzhiyun struct rk_cma_heap {
33*4882a593Smuzhiyun struct rk_dma_heap *heap;
34*4882a593Smuzhiyun struct cma *cma;
35*4882a593Smuzhiyun };
36*4882a593Smuzhiyun
37*4882a593Smuzhiyun struct rk_cma_heap_buffer {
38*4882a593Smuzhiyun struct rk_cma_heap *heap;
39*4882a593Smuzhiyun struct list_head attachments;
40*4882a593Smuzhiyun struct mutex lock;
41*4882a593Smuzhiyun unsigned long len;
42*4882a593Smuzhiyun struct page *cma_pages;
43*4882a593Smuzhiyun struct page **pages;
44*4882a593Smuzhiyun pgoff_t pagecount;
45*4882a593Smuzhiyun int vmap_cnt;
46*4882a593Smuzhiyun void *vaddr;
47*4882a593Smuzhiyun phys_addr_t phys;
48*4882a593Smuzhiyun bool attached;
49*4882a593Smuzhiyun };
50*4882a593Smuzhiyun
51*4882a593Smuzhiyun struct rk_cma_heap_attachment {
52*4882a593Smuzhiyun struct device *dev;
53*4882a593Smuzhiyun struct sg_table table;
54*4882a593Smuzhiyun struct list_head list;
55*4882a593Smuzhiyun bool mapped;
56*4882a593Smuzhiyun };
57*4882a593Smuzhiyun
rk_cma_heap_attach(struct dma_buf * dmabuf,struct dma_buf_attachment * attachment)58*4882a593Smuzhiyun static int rk_cma_heap_attach(struct dma_buf *dmabuf,
59*4882a593Smuzhiyun struct dma_buf_attachment *attachment)
60*4882a593Smuzhiyun {
61*4882a593Smuzhiyun struct rk_cma_heap_buffer *buffer = dmabuf->priv;
62*4882a593Smuzhiyun struct rk_cma_heap_attachment *a;
63*4882a593Smuzhiyun struct sg_table *table;
64*4882a593Smuzhiyun size_t size = buffer->pagecount << PAGE_SHIFT;
65*4882a593Smuzhiyun int ret;
66*4882a593Smuzhiyun
67*4882a593Smuzhiyun a = kzalloc(sizeof(*a), GFP_KERNEL);
68*4882a593Smuzhiyun if (!a)
69*4882a593Smuzhiyun return -ENOMEM;
70*4882a593Smuzhiyun
71*4882a593Smuzhiyun table = &a->table;
72*4882a593Smuzhiyun
73*4882a593Smuzhiyun ret = sg_alloc_table(table, 1, GFP_KERNEL);
74*4882a593Smuzhiyun if (ret) {
75*4882a593Smuzhiyun kfree(a);
76*4882a593Smuzhiyun return ret;
77*4882a593Smuzhiyun }
78*4882a593Smuzhiyun sg_set_page(table->sgl, buffer->cma_pages, PAGE_ALIGN(size), 0);
79*4882a593Smuzhiyun
80*4882a593Smuzhiyun a->dev = attachment->dev;
81*4882a593Smuzhiyun INIT_LIST_HEAD(&a->list);
82*4882a593Smuzhiyun a->mapped = false;
83*4882a593Smuzhiyun
84*4882a593Smuzhiyun attachment->priv = a;
85*4882a593Smuzhiyun
86*4882a593Smuzhiyun buffer->attached = true;
87*4882a593Smuzhiyun
88*4882a593Smuzhiyun mutex_lock(&buffer->lock);
89*4882a593Smuzhiyun list_add(&a->list, &buffer->attachments);
90*4882a593Smuzhiyun mutex_unlock(&buffer->lock);
91*4882a593Smuzhiyun
92*4882a593Smuzhiyun return 0;
93*4882a593Smuzhiyun }
94*4882a593Smuzhiyun
rk_cma_heap_detach(struct dma_buf * dmabuf,struct dma_buf_attachment * attachment)95*4882a593Smuzhiyun static void rk_cma_heap_detach(struct dma_buf *dmabuf,
96*4882a593Smuzhiyun struct dma_buf_attachment *attachment)
97*4882a593Smuzhiyun {
98*4882a593Smuzhiyun struct rk_cma_heap_buffer *buffer = dmabuf->priv;
99*4882a593Smuzhiyun struct rk_cma_heap_attachment *a = attachment->priv;
100*4882a593Smuzhiyun
101*4882a593Smuzhiyun mutex_lock(&buffer->lock);
102*4882a593Smuzhiyun list_del(&a->list);
103*4882a593Smuzhiyun mutex_unlock(&buffer->lock);
104*4882a593Smuzhiyun
105*4882a593Smuzhiyun buffer->attached = false;
106*4882a593Smuzhiyun
107*4882a593Smuzhiyun sg_free_table(&a->table);
108*4882a593Smuzhiyun kfree(a);
109*4882a593Smuzhiyun }
110*4882a593Smuzhiyun
rk_cma_heap_map_dma_buf(struct dma_buf_attachment * attachment,enum dma_data_direction direction)111*4882a593Smuzhiyun static struct sg_table *rk_cma_heap_map_dma_buf(struct dma_buf_attachment *attachment,
112*4882a593Smuzhiyun enum dma_data_direction direction)
113*4882a593Smuzhiyun {
114*4882a593Smuzhiyun struct rk_cma_heap_attachment *a = attachment->priv;
115*4882a593Smuzhiyun struct sg_table *table = &a->table;
116*4882a593Smuzhiyun int attrs = attachment->dma_map_attrs;
117*4882a593Smuzhiyun int ret;
118*4882a593Smuzhiyun
119*4882a593Smuzhiyun ret = dma_map_sgtable(attachment->dev, table, direction, attrs);
120*4882a593Smuzhiyun if (ret)
121*4882a593Smuzhiyun return ERR_PTR(-ENOMEM);
122*4882a593Smuzhiyun a->mapped = true;
123*4882a593Smuzhiyun return table;
124*4882a593Smuzhiyun }
125*4882a593Smuzhiyun
rk_cma_heap_unmap_dma_buf(struct dma_buf_attachment * attachment,struct sg_table * table,enum dma_data_direction direction)126*4882a593Smuzhiyun static void rk_cma_heap_unmap_dma_buf(struct dma_buf_attachment *attachment,
127*4882a593Smuzhiyun struct sg_table *table,
128*4882a593Smuzhiyun enum dma_data_direction direction)
129*4882a593Smuzhiyun {
130*4882a593Smuzhiyun struct rk_cma_heap_attachment *a = attachment->priv;
131*4882a593Smuzhiyun int attrs = attachment->dma_map_attrs;
132*4882a593Smuzhiyun
133*4882a593Smuzhiyun a->mapped = false;
134*4882a593Smuzhiyun dma_unmap_sgtable(attachment->dev, table, direction, attrs);
135*4882a593Smuzhiyun }
136*4882a593Smuzhiyun
137*4882a593Smuzhiyun static int
rk_cma_heap_dma_buf_begin_cpu_access_partial(struct dma_buf * dmabuf,enum dma_data_direction direction,unsigned int offset,unsigned int len)138*4882a593Smuzhiyun rk_cma_heap_dma_buf_begin_cpu_access_partial(struct dma_buf *dmabuf,
139*4882a593Smuzhiyun enum dma_data_direction direction,
140*4882a593Smuzhiyun unsigned int offset,
141*4882a593Smuzhiyun unsigned int len)
142*4882a593Smuzhiyun {
143*4882a593Smuzhiyun struct rk_cma_heap_buffer *buffer = dmabuf->priv;
144*4882a593Smuzhiyun struct rk_cma_heap_attachment *a;
145*4882a593Smuzhiyun
146*4882a593Smuzhiyun if (buffer->vmap_cnt)
147*4882a593Smuzhiyun invalidate_kernel_vmap_range(buffer->vaddr, buffer->len);
148*4882a593Smuzhiyun
149*4882a593Smuzhiyun mutex_lock(&buffer->lock);
150*4882a593Smuzhiyun list_for_each_entry(a, &buffer->attachments, list) {
151*4882a593Smuzhiyun if (!a->mapped)
152*4882a593Smuzhiyun continue;
153*4882a593Smuzhiyun dma_sync_sgtable_for_cpu(a->dev, &a->table, direction);
154*4882a593Smuzhiyun }
155*4882a593Smuzhiyun
156*4882a593Smuzhiyun /* For userspace that not attach yet */
157*4882a593Smuzhiyun if (buffer->phys && !buffer->attached)
158*4882a593Smuzhiyun dma_sync_single_for_cpu(rk_dma_heap_get_dev(buffer->heap->heap),
159*4882a593Smuzhiyun buffer->phys + offset,
160*4882a593Smuzhiyun len,
161*4882a593Smuzhiyun direction);
162*4882a593Smuzhiyun mutex_unlock(&buffer->lock);
163*4882a593Smuzhiyun
164*4882a593Smuzhiyun return 0;
165*4882a593Smuzhiyun }
166*4882a593Smuzhiyun
167*4882a593Smuzhiyun static int
rk_cma_heap_dma_buf_end_cpu_access_partial(struct dma_buf * dmabuf,enum dma_data_direction direction,unsigned int offset,unsigned int len)168*4882a593Smuzhiyun rk_cma_heap_dma_buf_end_cpu_access_partial(struct dma_buf *dmabuf,
169*4882a593Smuzhiyun enum dma_data_direction direction,
170*4882a593Smuzhiyun unsigned int offset,
171*4882a593Smuzhiyun unsigned int len)
172*4882a593Smuzhiyun {
173*4882a593Smuzhiyun struct rk_cma_heap_buffer *buffer = dmabuf->priv;
174*4882a593Smuzhiyun struct rk_cma_heap_attachment *a;
175*4882a593Smuzhiyun
176*4882a593Smuzhiyun if (buffer->vmap_cnt)
177*4882a593Smuzhiyun flush_kernel_vmap_range(buffer->vaddr, buffer->len);
178*4882a593Smuzhiyun
179*4882a593Smuzhiyun mutex_lock(&buffer->lock);
180*4882a593Smuzhiyun list_for_each_entry(a, &buffer->attachments, list) {
181*4882a593Smuzhiyun if (!a->mapped)
182*4882a593Smuzhiyun continue;
183*4882a593Smuzhiyun dma_sync_sgtable_for_device(a->dev, &a->table, direction);
184*4882a593Smuzhiyun }
185*4882a593Smuzhiyun
186*4882a593Smuzhiyun /* For userspace that not attach yet */
187*4882a593Smuzhiyun if (buffer->phys && !buffer->attached)
188*4882a593Smuzhiyun dma_sync_single_for_device(rk_dma_heap_get_dev(buffer->heap->heap),
189*4882a593Smuzhiyun buffer->phys + offset,
190*4882a593Smuzhiyun len,
191*4882a593Smuzhiyun direction);
192*4882a593Smuzhiyun mutex_unlock(&buffer->lock);
193*4882a593Smuzhiyun
194*4882a593Smuzhiyun return 0;
195*4882a593Smuzhiyun }
196*4882a593Smuzhiyun
rk_cma_heap_dma_buf_begin_cpu_access(struct dma_buf * dmabuf,enum dma_data_direction dir)197*4882a593Smuzhiyun static int rk_cma_heap_dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
198*4882a593Smuzhiyun enum dma_data_direction dir)
199*4882a593Smuzhiyun {
200*4882a593Smuzhiyun struct rk_cma_heap_buffer *buffer = dmabuf->priv;
201*4882a593Smuzhiyun unsigned int len = buffer->pagecount * PAGE_SIZE;
202*4882a593Smuzhiyun
203*4882a593Smuzhiyun return rk_cma_heap_dma_buf_begin_cpu_access_partial(dmabuf, dir, 0, len);
204*4882a593Smuzhiyun }
205*4882a593Smuzhiyun
rk_cma_heap_dma_buf_end_cpu_access(struct dma_buf * dmabuf,enum dma_data_direction dir)206*4882a593Smuzhiyun static int rk_cma_heap_dma_buf_end_cpu_access(struct dma_buf *dmabuf,
207*4882a593Smuzhiyun enum dma_data_direction dir)
208*4882a593Smuzhiyun {
209*4882a593Smuzhiyun struct rk_cma_heap_buffer *buffer = dmabuf->priv;
210*4882a593Smuzhiyun unsigned int len = buffer->pagecount * PAGE_SIZE;
211*4882a593Smuzhiyun
212*4882a593Smuzhiyun return rk_cma_heap_dma_buf_end_cpu_access_partial(dmabuf, dir, 0, len);
213*4882a593Smuzhiyun }
214*4882a593Smuzhiyun
rk_cma_heap_mmap(struct dma_buf * dmabuf,struct vm_area_struct * vma)215*4882a593Smuzhiyun static int rk_cma_heap_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
216*4882a593Smuzhiyun {
217*4882a593Smuzhiyun struct rk_cma_heap_buffer *buffer = dmabuf->priv;
218*4882a593Smuzhiyun size_t size = vma->vm_end - vma->vm_start;
219*4882a593Smuzhiyun int ret;
220*4882a593Smuzhiyun
221*4882a593Smuzhiyun ret = remap_pfn_range(vma, vma->vm_start, __phys_to_pfn(buffer->phys),
222*4882a593Smuzhiyun size, vma->vm_page_prot);
223*4882a593Smuzhiyun if (ret)
224*4882a593Smuzhiyun return -EAGAIN;
225*4882a593Smuzhiyun
226*4882a593Smuzhiyun return 0;
227*4882a593Smuzhiyun }
228*4882a593Smuzhiyun
rk_cma_heap_do_vmap(struct rk_cma_heap_buffer * buffer)229*4882a593Smuzhiyun static void *rk_cma_heap_do_vmap(struct rk_cma_heap_buffer *buffer)
230*4882a593Smuzhiyun {
231*4882a593Smuzhiyun void *vaddr;
232*4882a593Smuzhiyun pgprot_t pgprot = PAGE_KERNEL;
233*4882a593Smuzhiyun
234*4882a593Smuzhiyun vaddr = vmap(buffer->pages, buffer->pagecount, VM_MAP, pgprot);
235*4882a593Smuzhiyun if (!vaddr)
236*4882a593Smuzhiyun return ERR_PTR(-ENOMEM);
237*4882a593Smuzhiyun
238*4882a593Smuzhiyun return vaddr;
239*4882a593Smuzhiyun }
240*4882a593Smuzhiyun
rk_cma_heap_vmap(struct dma_buf * dmabuf)241*4882a593Smuzhiyun static void *rk_cma_heap_vmap(struct dma_buf *dmabuf)
242*4882a593Smuzhiyun {
243*4882a593Smuzhiyun struct rk_cma_heap_buffer *buffer = dmabuf->priv;
244*4882a593Smuzhiyun void *vaddr;
245*4882a593Smuzhiyun
246*4882a593Smuzhiyun mutex_lock(&buffer->lock);
247*4882a593Smuzhiyun if (buffer->vmap_cnt) {
248*4882a593Smuzhiyun buffer->vmap_cnt++;
249*4882a593Smuzhiyun vaddr = buffer->vaddr;
250*4882a593Smuzhiyun goto out;
251*4882a593Smuzhiyun }
252*4882a593Smuzhiyun
253*4882a593Smuzhiyun vaddr = rk_cma_heap_do_vmap(buffer);
254*4882a593Smuzhiyun if (IS_ERR(vaddr))
255*4882a593Smuzhiyun goto out;
256*4882a593Smuzhiyun
257*4882a593Smuzhiyun buffer->vaddr = vaddr;
258*4882a593Smuzhiyun buffer->vmap_cnt++;
259*4882a593Smuzhiyun out:
260*4882a593Smuzhiyun mutex_unlock(&buffer->lock);
261*4882a593Smuzhiyun
262*4882a593Smuzhiyun return vaddr;
263*4882a593Smuzhiyun }
264*4882a593Smuzhiyun
rk_cma_heap_vunmap(struct dma_buf * dmabuf,void * vaddr)265*4882a593Smuzhiyun static void rk_cma_heap_vunmap(struct dma_buf *dmabuf, void *vaddr)
266*4882a593Smuzhiyun {
267*4882a593Smuzhiyun struct rk_cma_heap_buffer *buffer = dmabuf->priv;
268*4882a593Smuzhiyun
269*4882a593Smuzhiyun mutex_lock(&buffer->lock);
270*4882a593Smuzhiyun if (!--buffer->vmap_cnt) {
271*4882a593Smuzhiyun vunmap(buffer->vaddr);
272*4882a593Smuzhiyun buffer->vaddr = NULL;
273*4882a593Smuzhiyun }
274*4882a593Smuzhiyun mutex_unlock(&buffer->lock);
275*4882a593Smuzhiyun }
276*4882a593Smuzhiyun
rk_cma_heap_remove_dmabuf_list(struct dma_buf * dmabuf)277*4882a593Smuzhiyun static void rk_cma_heap_remove_dmabuf_list(struct dma_buf *dmabuf)
278*4882a593Smuzhiyun {
279*4882a593Smuzhiyun struct rk_dma_heap_dmabuf *buf;
280*4882a593Smuzhiyun struct rk_cma_heap_buffer *buffer = dmabuf->priv;
281*4882a593Smuzhiyun struct rk_cma_heap *cma_heap = buffer->heap;
282*4882a593Smuzhiyun struct rk_dma_heap *heap = cma_heap->heap;
283*4882a593Smuzhiyun
284*4882a593Smuzhiyun mutex_lock(&heap->dmabuf_lock);
285*4882a593Smuzhiyun list_for_each_entry(buf, &heap->dmabuf_list, node) {
286*4882a593Smuzhiyun if (buf->dmabuf == dmabuf) {
287*4882a593Smuzhiyun dma_heap_print("<%s> free dmabuf<ino-%ld>@[%pa-%pa] to heap-<%s>\n",
288*4882a593Smuzhiyun dmabuf->name,
289*4882a593Smuzhiyun dmabuf->file->f_inode->i_ino,
290*4882a593Smuzhiyun &buf->start, &buf->end,
291*4882a593Smuzhiyun rk_dma_heap_get_name(heap));
292*4882a593Smuzhiyun list_del(&buf->node);
293*4882a593Smuzhiyun kfree(buf);
294*4882a593Smuzhiyun break;
295*4882a593Smuzhiyun }
296*4882a593Smuzhiyun }
297*4882a593Smuzhiyun mutex_unlock(&heap->dmabuf_lock);
298*4882a593Smuzhiyun }
299*4882a593Smuzhiyun
rk_cma_heap_add_dmabuf_list(struct dma_buf * dmabuf,const char * name)300*4882a593Smuzhiyun static int rk_cma_heap_add_dmabuf_list(struct dma_buf *dmabuf, const char *name)
301*4882a593Smuzhiyun {
302*4882a593Smuzhiyun struct rk_dma_heap_dmabuf *buf;
303*4882a593Smuzhiyun struct rk_cma_heap_buffer *buffer = dmabuf->priv;
304*4882a593Smuzhiyun struct rk_cma_heap *cma_heap = buffer->heap;
305*4882a593Smuzhiyun struct rk_dma_heap *heap = cma_heap->heap;
306*4882a593Smuzhiyun
307*4882a593Smuzhiyun buf = kzalloc(sizeof(*buf), GFP_KERNEL);
308*4882a593Smuzhiyun if (!buf)
309*4882a593Smuzhiyun return -ENOMEM;
310*4882a593Smuzhiyun
311*4882a593Smuzhiyun INIT_LIST_HEAD(&buf->node);
312*4882a593Smuzhiyun buf->dmabuf = dmabuf;
313*4882a593Smuzhiyun buf->start = buffer->phys;
314*4882a593Smuzhiyun buf->end = buf->start + buffer->len - 1;
315*4882a593Smuzhiyun mutex_lock(&heap->dmabuf_lock);
316*4882a593Smuzhiyun list_add_tail(&buf->node, &heap->dmabuf_list);
317*4882a593Smuzhiyun mutex_unlock(&heap->dmabuf_lock);
318*4882a593Smuzhiyun
319*4882a593Smuzhiyun dma_heap_print("<%s> alloc dmabuf<ino-%ld>@[%pa-%pa] from heap-<%s>\n",
320*4882a593Smuzhiyun dmabuf->name, dmabuf->file->f_inode->i_ino,
321*4882a593Smuzhiyun &buf->start, &buf->end, rk_dma_heap_get_name(heap));
322*4882a593Smuzhiyun
323*4882a593Smuzhiyun return 0;
324*4882a593Smuzhiyun }
325*4882a593Smuzhiyun
rk_cma_heap_remove_contig_list(struct rk_dma_heap * heap,struct page * page,const char * name)326*4882a593Smuzhiyun static int rk_cma_heap_remove_contig_list(struct rk_dma_heap *heap,
327*4882a593Smuzhiyun struct page *page, const char *name)
328*4882a593Smuzhiyun {
329*4882a593Smuzhiyun struct rk_dma_heap_contig_buf *buf;
330*4882a593Smuzhiyun
331*4882a593Smuzhiyun mutex_lock(&heap->contig_lock);
332*4882a593Smuzhiyun list_for_each_entry(buf, &heap->contig_list, node) {
333*4882a593Smuzhiyun if (buf->start == page_to_phys(page)) {
334*4882a593Smuzhiyun dma_heap_print("<%s> free contig-buf@[%pa-%pa] to heap-<%s>\n",
335*4882a593Smuzhiyun buf->orig_alloc, &buf->start, &buf->end,
336*4882a593Smuzhiyun rk_dma_heap_get_name(heap));
337*4882a593Smuzhiyun list_del(&buf->node);
338*4882a593Smuzhiyun kfree(buf->orig_alloc);
339*4882a593Smuzhiyun kfree(buf);
340*4882a593Smuzhiyun break;
341*4882a593Smuzhiyun }
342*4882a593Smuzhiyun }
343*4882a593Smuzhiyun mutex_unlock(&heap->contig_lock);
344*4882a593Smuzhiyun
345*4882a593Smuzhiyun return 0;
346*4882a593Smuzhiyun }
347*4882a593Smuzhiyun
rk_cma_heap_add_contig_list(struct rk_dma_heap * heap,struct page * page,unsigned long size,const char * name)348*4882a593Smuzhiyun static int rk_cma_heap_add_contig_list(struct rk_dma_heap *heap,
349*4882a593Smuzhiyun struct page *page, unsigned long size,
350*4882a593Smuzhiyun const char *name)
351*4882a593Smuzhiyun {
352*4882a593Smuzhiyun struct rk_dma_heap_contig_buf *buf;
353*4882a593Smuzhiyun const char *name_tmp;
354*4882a593Smuzhiyun
355*4882a593Smuzhiyun buf = kzalloc(sizeof(*buf), GFP_KERNEL);
356*4882a593Smuzhiyun if (!buf)
357*4882a593Smuzhiyun return -ENOMEM;
358*4882a593Smuzhiyun
359*4882a593Smuzhiyun INIT_LIST_HEAD(&buf->node);
360*4882a593Smuzhiyun if (!name)
361*4882a593Smuzhiyun name_tmp = current->comm;
362*4882a593Smuzhiyun else
363*4882a593Smuzhiyun name_tmp = name;
364*4882a593Smuzhiyun
365*4882a593Smuzhiyun buf->orig_alloc = kstrndup(name_tmp, RK_DMA_HEAP_NAME_LEN, GFP_KERNEL);
366*4882a593Smuzhiyun if (!buf->orig_alloc) {
367*4882a593Smuzhiyun kfree(buf);
368*4882a593Smuzhiyun return -ENOMEM;
369*4882a593Smuzhiyun }
370*4882a593Smuzhiyun
371*4882a593Smuzhiyun buf->start = page_to_phys(page);
372*4882a593Smuzhiyun buf->end = buf->start + size - 1;
373*4882a593Smuzhiyun mutex_lock(&heap->contig_lock);
374*4882a593Smuzhiyun list_add_tail(&buf->node, &heap->contig_list);
375*4882a593Smuzhiyun mutex_unlock(&heap->contig_lock);
376*4882a593Smuzhiyun
377*4882a593Smuzhiyun dma_heap_print("<%s> alloc contig-buf@[%pa-%pa] from heap-<%s>\n",
378*4882a593Smuzhiyun buf->orig_alloc, &buf->start, &buf->end,
379*4882a593Smuzhiyun rk_dma_heap_get_name(heap));
380*4882a593Smuzhiyun
381*4882a593Smuzhiyun return 0;
382*4882a593Smuzhiyun }
383*4882a593Smuzhiyun
rk_cma_heap_dma_buf_release(struct dma_buf * dmabuf)384*4882a593Smuzhiyun static void rk_cma_heap_dma_buf_release(struct dma_buf *dmabuf)
385*4882a593Smuzhiyun {
386*4882a593Smuzhiyun struct rk_cma_heap_buffer *buffer = dmabuf->priv;
387*4882a593Smuzhiyun struct rk_cma_heap *cma_heap = buffer->heap;
388*4882a593Smuzhiyun struct rk_dma_heap *heap = cma_heap->heap;
389*4882a593Smuzhiyun
390*4882a593Smuzhiyun if (buffer->vmap_cnt > 0) {
391*4882a593Smuzhiyun WARN(1, "%s: buffer still mapped in the kernel\n", __func__);
392*4882a593Smuzhiyun vunmap(buffer->vaddr);
393*4882a593Smuzhiyun }
394*4882a593Smuzhiyun
395*4882a593Smuzhiyun rk_cma_heap_remove_dmabuf_list(dmabuf);
396*4882a593Smuzhiyun
397*4882a593Smuzhiyun /* free page list */
398*4882a593Smuzhiyun kfree(buffer->pages);
399*4882a593Smuzhiyun /* release memory */
400*4882a593Smuzhiyun cma_release(cma_heap->cma, buffer->cma_pages, buffer->pagecount);
401*4882a593Smuzhiyun rk_dma_heap_total_dec(heap, buffer->len);
402*4882a593Smuzhiyun
403*4882a593Smuzhiyun kfree(buffer);
404*4882a593Smuzhiyun }
405*4882a593Smuzhiyun
406*4882a593Smuzhiyun static const struct dma_buf_ops rk_cma_heap_buf_ops = {
407*4882a593Smuzhiyun .cache_sgt_mapping = true,
408*4882a593Smuzhiyun .attach = rk_cma_heap_attach,
409*4882a593Smuzhiyun .detach = rk_cma_heap_detach,
410*4882a593Smuzhiyun .map_dma_buf = rk_cma_heap_map_dma_buf,
411*4882a593Smuzhiyun .unmap_dma_buf = rk_cma_heap_unmap_dma_buf,
412*4882a593Smuzhiyun .begin_cpu_access = rk_cma_heap_dma_buf_begin_cpu_access,
413*4882a593Smuzhiyun .end_cpu_access = rk_cma_heap_dma_buf_end_cpu_access,
414*4882a593Smuzhiyun .begin_cpu_access_partial = rk_cma_heap_dma_buf_begin_cpu_access_partial,
415*4882a593Smuzhiyun .end_cpu_access_partial = rk_cma_heap_dma_buf_end_cpu_access_partial,
416*4882a593Smuzhiyun .mmap = rk_cma_heap_mmap,
417*4882a593Smuzhiyun .vmap = rk_cma_heap_vmap,
418*4882a593Smuzhiyun .vunmap = rk_cma_heap_vunmap,
419*4882a593Smuzhiyun .release = rk_cma_heap_dma_buf_release,
420*4882a593Smuzhiyun };
421*4882a593Smuzhiyun
rk_cma_heap_allocate(struct rk_dma_heap * heap,unsigned long len,unsigned long fd_flags,unsigned long heap_flags,const char * name)422*4882a593Smuzhiyun static struct dma_buf *rk_cma_heap_allocate(struct rk_dma_heap *heap,
423*4882a593Smuzhiyun unsigned long len,
424*4882a593Smuzhiyun unsigned long fd_flags,
425*4882a593Smuzhiyun unsigned long heap_flags,
426*4882a593Smuzhiyun const char *name)
427*4882a593Smuzhiyun {
428*4882a593Smuzhiyun struct rk_cma_heap *cma_heap = rk_dma_heap_get_drvdata(heap);
429*4882a593Smuzhiyun struct rk_cma_heap_buffer *buffer;
430*4882a593Smuzhiyun DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
431*4882a593Smuzhiyun size_t size = PAGE_ALIGN(len);
432*4882a593Smuzhiyun pgoff_t pagecount = size >> PAGE_SHIFT;
433*4882a593Smuzhiyun unsigned long align = get_order(size);
434*4882a593Smuzhiyun struct page *cma_pages;
435*4882a593Smuzhiyun struct dma_buf *dmabuf;
436*4882a593Smuzhiyun pgoff_t pg;
437*4882a593Smuzhiyun int ret = -ENOMEM;
438*4882a593Smuzhiyun
439*4882a593Smuzhiyun buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
440*4882a593Smuzhiyun if (!buffer)
441*4882a593Smuzhiyun return ERR_PTR(-ENOMEM);
442*4882a593Smuzhiyun
443*4882a593Smuzhiyun INIT_LIST_HEAD(&buffer->attachments);
444*4882a593Smuzhiyun mutex_init(&buffer->lock);
445*4882a593Smuzhiyun buffer->len = size;
446*4882a593Smuzhiyun
447*4882a593Smuzhiyun if (align > CONFIG_DMABUF_HEAPS_ROCKCHIP_CMA_ALIGNMENT)
448*4882a593Smuzhiyun align = CONFIG_DMABUF_HEAPS_ROCKCHIP_CMA_ALIGNMENT;
449*4882a593Smuzhiyun
450*4882a593Smuzhiyun cma_pages = cma_alloc(cma_heap->cma, pagecount, align, GFP_KERNEL);
451*4882a593Smuzhiyun if (!cma_pages)
452*4882a593Smuzhiyun goto free_buffer;
453*4882a593Smuzhiyun
454*4882a593Smuzhiyun /* Clear the cma pages */
455*4882a593Smuzhiyun if (PageHighMem(cma_pages)) {
456*4882a593Smuzhiyun unsigned long nr_clear_pages = pagecount;
457*4882a593Smuzhiyun struct page *page = cma_pages;
458*4882a593Smuzhiyun
459*4882a593Smuzhiyun while (nr_clear_pages > 0) {
460*4882a593Smuzhiyun void *vaddr = kmap_atomic(page);
461*4882a593Smuzhiyun
462*4882a593Smuzhiyun memset(vaddr, 0, PAGE_SIZE);
463*4882a593Smuzhiyun kunmap_atomic(vaddr);
464*4882a593Smuzhiyun /*
465*4882a593Smuzhiyun * Avoid wasting time zeroing memory if the process
466*4882a593Smuzhiyun * has been killed by SIGKILL
467*4882a593Smuzhiyun */
468*4882a593Smuzhiyun if (fatal_signal_pending(current))
469*4882a593Smuzhiyun goto free_cma;
470*4882a593Smuzhiyun page++;
471*4882a593Smuzhiyun nr_clear_pages--;
472*4882a593Smuzhiyun }
473*4882a593Smuzhiyun } else {
474*4882a593Smuzhiyun memset(page_address(cma_pages), 0, size);
475*4882a593Smuzhiyun }
476*4882a593Smuzhiyun
477*4882a593Smuzhiyun buffer->pages = kmalloc_array(pagecount, sizeof(*buffer->pages),
478*4882a593Smuzhiyun GFP_KERNEL);
479*4882a593Smuzhiyun if (!buffer->pages) {
480*4882a593Smuzhiyun ret = -ENOMEM;
481*4882a593Smuzhiyun goto free_cma;
482*4882a593Smuzhiyun }
483*4882a593Smuzhiyun
484*4882a593Smuzhiyun for (pg = 0; pg < pagecount; pg++)
485*4882a593Smuzhiyun buffer->pages[pg] = &cma_pages[pg];
486*4882a593Smuzhiyun
487*4882a593Smuzhiyun buffer->cma_pages = cma_pages;
488*4882a593Smuzhiyun buffer->heap = cma_heap;
489*4882a593Smuzhiyun buffer->pagecount = pagecount;
490*4882a593Smuzhiyun
491*4882a593Smuzhiyun /* create the dmabuf */
492*4882a593Smuzhiyun exp_info.exp_name = rk_dma_heap_get_name(heap);
493*4882a593Smuzhiyun exp_info.ops = &rk_cma_heap_buf_ops;
494*4882a593Smuzhiyun exp_info.size = buffer->len;
495*4882a593Smuzhiyun exp_info.flags = fd_flags;
496*4882a593Smuzhiyun exp_info.priv = buffer;
497*4882a593Smuzhiyun dmabuf = dma_buf_export(&exp_info);
498*4882a593Smuzhiyun if (IS_ERR(dmabuf)) {
499*4882a593Smuzhiyun ret = PTR_ERR(dmabuf);
500*4882a593Smuzhiyun goto free_pages;
501*4882a593Smuzhiyun }
502*4882a593Smuzhiyun
503*4882a593Smuzhiyun buffer->phys = page_to_phys(cma_pages);
504*4882a593Smuzhiyun dma_sync_single_for_cpu(rk_dma_heap_get_dev(heap), buffer->phys,
505*4882a593Smuzhiyun buffer->pagecount * PAGE_SIZE,
506*4882a593Smuzhiyun DMA_FROM_DEVICE);
507*4882a593Smuzhiyun
508*4882a593Smuzhiyun ret = rk_cma_heap_add_dmabuf_list(dmabuf, name);
509*4882a593Smuzhiyun if (ret)
510*4882a593Smuzhiyun goto fail_dma_buf;
511*4882a593Smuzhiyun
512*4882a593Smuzhiyun rk_dma_heap_total_inc(heap, buffer->len);
513*4882a593Smuzhiyun
514*4882a593Smuzhiyun return dmabuf;
515*4882a593Smuzhiyun
516*4882a593Smuzhiyun fail_dma_buf:
517*4882a593Smuzhiyun dma_buf_put(dmabuf);
518*4882a593Smuzhiyun free_pages:
519*4882a593Smuzhiyun kfree(buffer->pages);
520*4882a593Smuzhiyun free_cma:
521*4882a593Smuzhiyun cma_release(cma_heap->cma, cma_pages, pagecount);
522*4882a593Smuzhiyun free_buffer:
523*4882a593Smuzhiyun kfree(buffer);
524*4882a593Smuzhiyun
525*4882a593Smuzhiyun return ERR_PTR(ret);
526*4882a593Smuzhiyun }
527*4882a593Smuzhiyun
rk_cma_heap_allocate_pages(struct rk_dma_heap * heap,size_t len,const char * name)528*4882a593Smuzhiyun static struct page *rk_cma_heap_allocate_pages(struct rk_dma_heap *heap,
529*4882a593Smuzhiyun size_t len, const char *name)
530*4882a593Smuzhiyun {
531*4882a593Smuzhiyun struct rk_cma_heap *cma_heap = rk_dma_heap_get_drvdata(heap);
532*4882a593Smuzhiyun size_t size = PAGE_ALIGN(len);
533*4882a593Smuzhiyun pgoff_t pagecount = size >> PAGE_SHIFT;
534*4882a593Smuzhiyun unsigned long align = get_order(size);
535*4882a593Smuzhiyun struct page *page;
536*4882a593Smuzhiyun int ret;
537*4882a593Smuzhiyun
538*4882a593Smuzhiyun if (align > CONFIG_DMABUF_HEAPS_ROCKCHIP_CMA_ALIGNMENT)
539*4882a593Smuzhiyun align = CONFIG_DMABUF_HEAPS_ROCKCHIP_CMA_ALIGNMENT;
540*4882a593Smuzhiyun
541*4882a593Smuzhiyun page = cma_alloc(cma_heap->cma, pagecount, align, GFP_KERNEL);
542*4882a593Smuzhiyun if (!page)
543*4882a593Smuzhiyun return ERR_PTR(-ENOMEM);
544*4882a593Smuzhiyun
545*4882a593Smuzhiyun ret = rk_cma_heap_add_contig_list(heap, page, size, name);
546*4882a593Smuzhiyun if (ret) {
547*4882a593Smuzhiyun cma_release(cma_heap->cma, page, pagecount);
548*4882a593Smuzhiyun return ERR_PTR(-EINVAL);
549*4882a593Smuzhiyun }
550*4882a593Smuzhiyun
551*4882a593Smuzhiyun rk_dma_heap_total_inc(heap, size);
552*4882a593Smuzhiyun
553*4882a593Smuzhiyun return page;
554*4882a593Smuzhiyun }
555*4882a593Smuzhiyun
rk_cma_heap_free_pages(struct rk_dma_heap * heap,struct page * page,size_t len,const char * name)556*4882a593Smuzhiyun static void rk_cma_heap_free_pages(struct rk_dma_heap *heap,
557*4882a593Smuzhiyun struct page *page, size_t len,
558*4882a593Smuzhiyun const char *name)
559*4882a593Smuzhiyun {
560*4882a593Smuzhiyun struct rk_cma_heap *cma_heap = rk_dma_heap_get_drvdata(heap);
561*4882a593Smuzhiyun pgoff_t pagecount = len >> PAGE_SHIFT;
562*4882a593Smuzhiyun
563*4882a593Smuzhiyun rk_cma_heap_remove_contig_list(heap, page, name);
564*4882a593Smuzhiyun
565*4882a593Smuzhiyun cma_release(cma_heap->cma, page, pagecount);
566*4882a593Smuzhiyun
567*4882a593Smuzhiyun rk_dma_heap_total_dec(heap, len);
568*4882a593Smuzhiyun }
569*4882a593Smuzhiyun
570*4882a593Smuzhiyun static const struct rk_dma_heap_ops rk_cma_heap_ops = {
571*4882a593Smuzhiyun .allocate = rk_cma_heap_allocate,
572*4882a593Smuzhiyun .alloc_contig_pages = rk_cma_heap_allocate_pages,
573*4882a593Smuzhiyun .free_contig_pages = rk_cma_heap_free_pages,
574*4882a593Smuzhiyun };
575*4882a593Smuzhiyun
576*4882a593Smuzhiyun static int cma_procfs_show(struct seq_file *s, void *private);
577*4882a593Smuzhiyun
__rk_add_cma_heap(struct cma * cma,void * data)578*4882a593Smuzhiyun static int __rk_add_cma_heap(struct cma *cma, void *data)
579*4882a593Smuzhiyun {
580*4882a593Smuzhiyun struct rk_cma_heap *cma_heap;
581*4882a593Smuzhiyun struct rk_dma_heap_export_info exp_info;
582*4882a593Smuzhiyun
583*4882a593Smuzhiyun cma_heap = kzalloc(sizeof(*cma_heap), GFP_KERNEL);
584*4882a593Smuzhiyun if (!cma_heap)
585*4882a593Smuzhiyun return -ENOMEM;
586*4882a593Smuzhiyun cma_heap->cma = cma;
587*4882a593Smuzhiyun
588*4882a593Smuzhiyun exp_info.name = cma_get_name(cma);
589*4882a593Smuzhiyun exp_info.ops = &rk_cma_heap_ops;
590*4882a593Smuzhiyun exp_info.priv = cma_heap;
591*4882a593Smuzhiyun exp_info.support_cma = true;
592*4882a593Smuzhiyun
593*4882a593Smuzhiyun cma_heap->heap = rk_dma_heap_add(&exp_info);
594*4882a593Smuzhiyun if (IS_ERR(cma_heap->heap)) {
595*4882a593Smuzhiyun int ret = PTR_ERR(cma_heap->heap);
596*4882a593Smuzhiyun
597*4882a593Smuzhiyun kfree(cma_heap);
598*4882a593Smuzhiyun return ret;
599*4882a593Smuzhiyun }
600*4882a593Smuzhiyun
601*4882a593Smuzhiyun if (cma_heap->heap->procfs)
602*4882a593Smuzhiyun proc_create_single_data("alloc_bitmap", 0, cma_heap->heap->procfs,
603*4882a593Smuzhiyun cma_procfs_show, cma);
604*4882a593Smuzhiyun
605*4882a593Smuzhiyun return 0;
606*4882a593Smuzhiyun }
607*4882a593Smuzhiyun
rk_add_default_cma_heap(void)608*4882a593Smuzhiyun static int __init rk_add_default_cma_heap(void)
609*4882a593Smuzhiyun {
610*4882a593Smuzhiyun struct cma *cma = rk_dma_heap_get_cma();
611*4882a593Smuzhiyun
612*4882a593Smuzhiyun if (WARN_ON(!cma))
613*4882a593Smuzhiyun return -EINVAL;
614*4882a593Smuzhiyun
615*4882a593Smuzhiyun return __rk_add_cma_heap(cma, NULL);
616*4882a593Smuzhiyun }
617*4882a593Smuzhiyun
618*4882a593Smuzhiyun #if defined(CONFIG_VIDEO_ROCKCHIP_THUNDER_BOOT_ISP) && !defined(CONFIG_INITCALL_ASYNC)
619*4882a593Smuzhiyun subsys_initcall(rk_add_default_cma_heap);
620*4882a593Smuzhiyun #else
621*4882a593Smuzhiyun module_init(rk_add_default_cma_heap);
622*4882a593Smuzhiyun #endif
623*4882a593Smuzhiyun
cma_procfs_format_array(char * buf,size_t bufsize,u32 * array,int array_size)624*4882a593Smuzhiyun static void cma_procfs_format_array(char *buf, size_t bufsize, u32 *array, int array_size)
625*4882a593Smuzhiyun {
626*4882a593Smuzhiyun int i = 0;
627*4882a593Smuzhiyun
628*4882a593Smuzhiyun while (--array_size >= 0) {
629*4882a593Smuzhiyun size_t len;
630*4882a593Smuzhiyun char term = (array_size && (++i % 8)) ? ' ' : '\n';
631*4882a593Smuzhiyun
632*4882a593Smuzhiyun len = snprintf(buf, bufsize, "%08X%c", *array++, term);
633*4882a593Smuzhiyun buf += len;
634*4882a593Smuzhiyun bufsize -= len;
635*4882a593Smuzhiyun }
636*4882a593Smuzhiyun }
637*4882a593Smuzhiyun
cma_procfs_show_bitmap(struct seq_file * s,struct cma * cma)638*4882a593Smuzhiyun static void cma_procfs_show_bitmap(struct seq_file *s, struct cma *cma)
639*4882a593Smuzhiyun {
640*4882a593Smuzhiyun int elements = DIV_ROUND_UP(cma_bitmap_maxno(cma), BITS_PER_BYTE * sizeof(u32));
641*4882a593Smuzhiyun int size = elements * 9;
642*4882a593Smuzhiyun u32 *array = (u32 *)cma->bitmap;
643*4882a593Smuzhiyun char *buf;
644*4882a593Smuzhiyun
645*4882a593Smuzhiyun buf = kmalloc(size + 1, GFP_KERNEL);
646*4882a593Smuzhiyun if (!buf)
647*4882a593Smuzhiyun return;
648*4882a593Smuzhiyun
649*4882a593Smuzhiyun buf[size] = 0;
650*4882a593Smuzhiyun
651*4882a593Smuzhiyun cma_procfs_format_array(buf, size + 1, array, elements);
652*4882a593Smuzhiyun seq_printf(s, "%s", buf);
653*4882a593Smuzhiyun kfree(buf);
654*4882a593Smuzhiyun }
655*4882a593Smuzhiyun
cma_procfs_used_get(struct cma * cma)656*4882a593Smuzhiyun static u64 cma_procfs_used_get(struct cma *cma)
657*4882a593Smuzhiyun {
658*4882a593Smuzhiyun unsigned long used;
659*4882a593Smuzhiyun
660*4882a593Smuzhiyun mutex_lock(&cma->lock);
661*4882a593Smuzhiyun used = bitmap_weight(cma->bitmap, (int)cma_bitmap_maxno(cma));
662*4882a593Smuzhiyun mutex_unlock(&cma->lock);
663*4882a593Smuzhiyun
664*4882a593Smuzhiyun return (u64)used << cma->order_per_bit;
665*4882a593Smuzhiyun }
666*4882a593Smuzhiyun
cma_procfs_show(struct seq_file * s,void * private)667*4882a593Smuzhiyun static int cma_procfs_show(struct seq_file *s, void *private)
668*4882a593Smuzhiyun {
669*4882a593Smuzhiyun struct cma *cma = s->private;
670*4882a593Smuzhiyun u64 used = cma_procfs_used_get(cma);
671*4882a593Smuzhiyun
672*4882a593Smuzhiyun seq_printf(s, "Total: %lu KiB\n", cma->count << (PAGE_SHIFT - 10));
673*4882a593Smuzhiyun seq_printf(s, " Used: %llu KiB\n\n", used << (PAGE_SHIFT - 10));
674*4882a593Smuzhiyun
675*4882a593Smuzhiyun cma_procfs_show_bitmap(s, cma);
676*4882a593Smuzhiyun
677*4882a593Smuzhiyun return 0;
678*4882a593Smuzhiyun }
679*4882a593Smuzhiyun
680*4882a593Smuzhiyun MODULE_DESCRIPTION("RockChip DMA-BUF CMA Heap");
681*4882a593Smuzhiyun MODULE_LICENSE("GPL v2");
682