1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * SRAM DMA-Heap exporter && support alloc page and dmabuf on kernel
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com/
6*4882a593Smuzhiyun *
7*4882a593Smuzhiyun * Author: Andrew F. Davis <afd@ti.com>
8*4882a593Smuzhiyun *
9*4882a593Smuzhiyun * Copyright (C) 2022 Rockchip Electronics Co., Ltd.
10*4882a593Smuzhiyun *
11*4882a593Smuzhiyun * Author: Huang Lee <Putin.li@rock-chips.com>
12*4882a593Smuzhiyun */
13*4882a593Smuzhiyun #define pr_fmt(fmt) "sram_heap: " fmt
14*4882a593Smuzhiyun
15*4882a593Smuzhiyun #include <linux/dma-mapping.h>
16*4882a593Smuzhiyun #include <linux/err.h>
17*4882a593Smuzhiyun #include <linux/genalloc.h>
18*4882a593Smuzhiyun #include <linux/io.h>
19*4882a593Smuzhiyun #include <linux/mm.h>
20*4882a593Smuzhiyun #include <linux/scatterlist.h>
21*4882a593Smuzhiyun #include <linux/slab.h>
22*4882a593Smuzhiyun #include <linux/dma-buf.h>
23*4882a593Smuzhiyun #include <linux/dma-heap.h>
24*4882a593Smuzhiyun #include <linux/delay.h>
25*4882a593Smuzhiyun #include <linux/of.h>
26*4882a593Smuzhiyun #include <linux/of_address.h>
27*4882a593Smuzhiyun #include <linux/module.h>
28*4882a593Smuzhiyun
29*4882a593Smuzhiyun #include <linux/sram_heap.h>
30*4882a593Smuzhiyun #include <linux/of_platform.h>
31*4882a593Smuzhiyun #include <linux/platform_device.h>
32*4882a593Smuzhiyun
33*4882a593Smuzhiyun #define RK3588_SRAM_BASE 0xff001000
34*4882a593Smuzhiyun
35*4882a593Smuzhiyun struct sram_dma_heap {
36*4882a593Smuzhiyun struct dma_heap *heap;
37*4882a593Smuzhiyun struct gen_pool *pool;
38*4882a593Smuzhiyun };
39*4882a593Smuzhiyun
40*4882a593Smuzhiyun struct sram_dma_heap_buffer {
41*4882a593Smuzhiyun struct gen_pool *pool;
42*4882a593Smuzhiyun struct list_head attachments;
43*4882a593Smuzhiyun struct mutex attachments_lock;
44*4882a593Smuzhiyun unsigned long len;
45*4882a593Smuzhiyun void *vaddr;
46*4882a593Smuzhiyun phys_addr_t paddr;
47*4882a593Smuzhiyun };
48*4882a593Smuzhiyun
49*4882a593Smuzhiyun struct dma_heap_attachment {
50*4882a593Smuzhiyun struct device *dev;
51*4882a593Smuzhiyun struct sg_table *table;
52*4882a593Smuzhiyun struct list_head list;
53*4882a593Smuzhiyun };
54*4882a593Smuzhiyun
dma_heap_attach(struct dma_buf * dmabuf,struct dma_buf_attachment * attachment)55*4882a593Smuzhiyun static int dma_heap_attach(struct dma_buf *dmabuf,
56*4882a593Smuzhiyun struct dma_buf_attachment *attachment)
57*4882a593Smuzhiyun {
58*4882a593Smuzhiyun struct sram_dma_heap_buffer *buffer = dmabuf->priv;
59*4882a593Smuzhiyun struct dma_heap_attachment *a;
60*4882a593Smuzhiyun struct sg_table *table;
61*4882a593Smuzhiyun
62*4882a593Smuzhiyun a = kzalloc(sizeof(*a), GFP_KERNEL);
63*4882a593Smuzhiyun if (!a)
64*4882a593Smuzhiyun return -ENOMEM;
65*4882a593Smuzhiyun
66*4882a593Smuzhiyun table = kmalloc(sizeof(*table), GFP_KERNEL);
67*4882a593Smuzhiyun if (!table)
68*4882a593Smuzhiyun goto table_alloc_failed;
69*4882a593Smuzhiyun
70*4882a593Smuzhiyun if (sg_alloc_table(table, 1, GFP_KERNEL))
71*4882a593Smuzhiyun goto sg_alloc_failed;
72*4882a593Smuzhiyun
73*4882a593Smuzhiyun /*
74*4882a593Smuzhiyun * The referenced pfn and page are for setting the sram address to the
75*4882a593Smuzhiyun * sgtable, and cannot be used for other purposes, and cannot be accessed
76*4882a593Smuzhiyun * directly or indirectly.
77*4882a593Smuzhiyun *
78*4882a593Smuzhiyun * And not sure if there is a problem with the 32-bit system.
79*4882a593Smuzhiyun *
80*4882a593Smuzhiyun * page cannot support kmap func.
81*4882a593Smuzhiyun */
82*4882a593Smuzhiyun sg_set_page(table->sgl, pfn_to_page(PFN_DOWN(buffer->paddr)), buffer->len, 0);
83*4882a593Smuzhiyun
84*4882a593Smuzhiyun a->table = table;
85*4882a593Smuzhiyun a->dev = attachment->dev;
86*4882a593Smuzhiyun INIT_LIST_HEAD(&a->list);
87*4882a593Smuzhiyun
88*4882a593Smuzhiyun attachment->priv = a;
89*4882a593Smuzhiyun
90*4882a593Smuzhiyun mutex_lock(&buffer->attachments_lock);
91*4882a593Smuzhiyun list_add(&a->list, &buffer->attachments);
92*4882a593Smuzhiyun mutex_unlock(&buffer->attachments_lock);
93*4882a593Smuzhiyun
94*4882a593Smuzhiyun return 0;
95*4882a593Smuzhiyun
96*4882a593Smuzhiyun sg_alloc_failed:
97*4882a593Smuzhiyun kfree(table);
98*4882a593Smuzhiyun table_alloc_failed:
99*4882a593Smuzhiyun kfree(a);
100*4882a593Smuzhiyun return -ENOMEM;
101*4882a593Smuzhiyun }
102*4882a593Smuzhiyun
dma_heap_detatch(struct dma_buf * dmabuf,struct dma_buf_attachment * attachment)103*4882a593Smuzhiyun static void dma_heap_detatch(struct dma_buf *dmabuf,
104*4882a593Smuzhiyun struct dma_buf_attachment *attachment)
105*4882a593Smuzhiyun {
106*4882a593Smuzhiyun struct sram_dma_heap_buffer *buffer = dmabuf->priv;
107*4882a593Smuzhiyun struct dma_heap_attachment *a = attachment->priv;
108*4882a593Smuzhiyun
109*4882a593Smuzhiyun mutex_lock(&buffer->attachments_lock);
110*4882a593Smuzhiyun list_del(&a->list);
111*4882a593Smuzhiyun mutex_unlock(&buffer->attachments_lock);
112*4882a593Smuzhiyun
113*4882a593Smuzhiyun sg_free_table(a->table);
114*4882a593Smuzhiyun kfree(a->table);
115*4882a593Smuzhiyun kfree(a);
116*4882a593Smuzhiyun }
117*4882a593Smuzhiyun
dma_heap_map_dma_buf(struct dma_buf_attachment * attachment,enum dma_data_direction direction)118*4882a593Smuzhiyun static struct sg_table *dma_heap_map_dma_buf(struct dma_buf_attachment *attachment,
119*4882a593Smuzhiyun enum dma_data_direction direction)
120*4882a593Smuzhiyun {
121*4882a593Smuzhiyun struct dma_heap_attachment *a = attachment->priv;
122*4882a593Smuzhiyun struct sg_table *table = a->table;
123*4882a593Smuzhiyun int ret = 0;
124*4882a593Smuzhiyun
125*4882a593Smuzhiyun ret = dma_map_sgtable(attachment->dev, table, direction, DMA_ATTR_SKIP_CPU_SYNC);
126*4882a593Smuzhiyun if (ret)
127*4882a593Smuzhiyun return ERR_PTR(-ENOMEM);
128*4882a593Smuzhiyun
129*4882a593Smuzhiyun return table;
130*4882a593Smuzhiyun }
131*4882a593Smuzhiyun
dma_heap_unmap_dma_buf(struct dma_buf_attachment * attachment,struct sg_table * table,enum dma_data_direction direction)132*4882a593Smuzhiyun static void dma_heap_unmap_dma_buf(struct dma_buf_attachment *attachment,
133*4882a593Smuzhiyun struct sg_table *table,
134*4882a593Smuzhiyun enum dma_data_direction direction)
135*4882a593Smuzhiyun {
136*4882a593Smuzhiyun dma_unmap_sgtable(attachment->dev, table, direction, DMA_ATTR_SKIP_CPU_SYNC);
137*4882a593Smuzhiyun }
138*4882a593Smuzhiyun
dma_heap_dma_buf_release(struct dma_buf * dmabuf)139*4882a593Smuzhiyun static void dma_heap_dma_buf_release(struct dma_buf *dmabuf)
140*4882a593Smuzhiyun {
141*4882a593Smuzhiyun struct sram_dma_heap_buffer *buffer = dmabuf->priv;
142*4882a593Smuzhiyun
143*4882a593Smuzhiyun gen_pool_free(buffer->pool, (unsigned long)buffer->vaddr, buffer->len);
144*4882a593Smuzhiyun kfree(buffer);
145*4882a593Smuzhiyun }
146*4882a593Smuzhiyun
dma_heap_mmap(struct dma_buf * dmabuf,struct vm_area_struct * vma)147*4882a593Smuzhiyun static int dma_heap_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
148*4882a593Smuzhiyun {
149*4882a593Smuzhiyun struct sram_dma_heap_buffer *buffer = dmabuf->priv;
150*4882a593Smuzhiyun int ret;
151*4882a593Smuzhiyun
152*4882a593Smuzhiyun /* SRAM mappings are not cached */
153*4882a593Smuzhiyun vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
154*4882a593Smuzhiyun
155*4882a593Smuzhiyun ret = vm_iomap_memory(vma, buffer->paddr, buffer->len);
156*4882a593Smuzhiyun if (ret)
157*4882a593Smuzhiyun pr_err("Could not map buffer to userspace\n");
158*4882a593Smuzhiyun
159*4882a593Smuzhiyun return ret;
160*4882a593Smuzhiyun }
161*4882a593Smuzhiyun
dma_heap_vmap(struct dma_buf * dmabuf)162*4882a593Smuzhiyun static void *dma_heap_vmap(struct dma_buf *dmabuf)
163*4882a593Smuzhiyun {
164*4882a593Smuzhiyun struct sram_dma_heap_buffer *buffer = dmabuf->priv;
165*4882a593Smuzhiyun
166*4882a593Smuzhiyun return buffer->vaddr;
167*4882a593Smuzhiyun }
168*4882a593Smuzhiyun
169*4882a593Smuzhiyun static const struct dma_buf_ops sram_dma_heap_buf_ops = {
170*4882a593Smuzhiyun .attach = dma_heap_attach,
171*4882a593Smuzhiyun .detach = dma_heap_detatch,
172*4882a593Smuzhiyun .map_dma_buf = dma_heap_map_dma_buf,
173*4882a593Smuzhiyun .unmap_dma_buf = dma_heap_unmap_dma_buf,
174*4882a593Smuzhiyun .release = dma_heap_dma_buf_release,
175*4882a593Smuzhiyun .mmap = dma_heap_mmap,
176*4882a593Smuzhiyun .vmap = dma_heap_vmap,
177*4882a593Smuzhiyun };
178*4882a593Smuzhiyun
sram_dma_heap_allocate(struct dma_heap * heap,unsigned long len,unsigned long fd_flags,unsigned long heap_flags)179*4882a593Smuzhiyun static struct dma_buf *sram_dma_heap_allocate(struct dma_heap *heap,
180*4882a593Smuzhiyun unsigned long len,
181*4882a593Smuzhiyun unsigned long fd_flags,
182*4882a593Smuzhiyun unsigned long heap_flags)
183*4882a593Smuzhiyun {
184*4882a593Smuzhiyun struct sram_dma_heap *sram_dma_heap = dma_heap_get_drvdata(heap);
185*4882a593Smuzhiyun struct sram_dma_heap_buffer *buffer;
186*4882a593Smuzhiyun
187*4882a593Smuzhiyun DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
188*4882a593Smuzhiyun struct dma_buf *dmabuf;
189*4882a593Smuzhiyun int ret = -ENOMEM;
190*4882a593Smuzhiyun
191*4882a593Smuzhiyun buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
192*4882a593Smuzhiyun if (!buffer)
193*4882a593Smuzhiyun return ERR_PTR(-ENOMEM);
194*4882a593Smuzhiyun buffer->pool = sram_dma_heap->pool;
195*4882a593Smuzhiyun INIT_LIST_HEAD(&buffer->attachments);
196*4882a593Smuzhiyun mutex_init(&buffer->attachments_lock);
197*4882a593Smuzhiyun buffer->len = len;
198*4882a593Smuzhiyun
199*4882a593Smuzhiyun buffer->vaddr = (void *)gen_pool_alloc(buffer->pool, buffer->len);
200*4882a593Smuzhiyun if (!buffer->vaddr) {
201*4882a593Smuzhiyun ret = -ENOMEM;
202*4882a593Smuzhiyun goto free_buffer;
203*4882a593Smuzhiyun }
204*4882a593Smuzhiyun
205*4882a593Smuzhiyun buffer->paddr = gen_pool_virt_to_phys(buffer->pool, (unsigned long)buffer->vaddr);
206*4882a593Smuzhiyun if (buffer->paddr == -1) {
207*4882a593Smuzhiyun ret = -ENOMEM;
208*4882a593Smuzhiyun goto free_pool;
209*4882a593Smuzhiyun }
210*4882a593Smuzhiyun
211*4882a593Smuzhiyun /* create the dmabuf */
212*4882a593Smuzhiyun exp_info.ops = &sram_dma_heap_buf_ops;
213*4882a593Smuzhiyun exp_info.size = buffer->len;
214*4882a593Smuzhiyun exp_info.flags = fd_flags;
215*4882a593Smuzhiyun exp_info.priv = buffer;
216*4882a593Smuzhiyun dmabuf = dma_buf_export(&exp_info);
217*4882a593Smuzhiyun if (IS_ERR(dmabuf)) {
218*4882a593Smuzhiyun ret = PTR_ERR(dmabuf);
219*4882a593Smuzhiyun goto free_pool;
220*4882a593Smuzhiyun }
221*4882a593Smuzhiyun
222*4882a593Smuzhiyun return dmabuf;
223*4882a593Smuzhiyun
224*4882a593Smuzhiyun free_pool:
225*4882a593Smuzhiyun gen_pool_free(buffer->pool, (unsigned long)buffer->vaddr, buffer->len);
226*4882a593Smuzhiyun free_buffer:
227*4882a593Smuzhiyun kfree(buffer);
228*4882a593Smuzhiyun
229*4882a593Smuzhiyun return ERR_PTR(ret);
230*4882a593Smuzhiyun }
231*4882a593Smuzhiyun
232*4882a593Smuzhiyun static struct dma_heap_ops sram_dma_heap_ops = {
233*4882a593Smuzhiyun .allocate = sram_dma_heap_allocate,
234*4882a593Smuzhiyun };
235*4882a593Smuzhiyun
236*4882a593Smuzhiyun static struct sram_dma_heap *sram_dma_heap_global;
237*4882a593Smuzhiyun
sram_dma_heap_export(const char * name,struct gen_pool * sram_gp)238*4882a593Smuzhiyun static int sram_dma_heap_export(const char *name,
239*4882a593Smuzhiyun struct gen_pool *sram_gp)
240*4882a593Smuzhiyun {
241*4882a593Smuzhiyun struct sram_dma_heap *sram_dma_heap;
242*4882a593Smuzhiyun struct dma_heap_export_info exp_info;
243*4882a593Smuzhiyun
244*4882a593Smuzhiyun pr_info("Exporting SRAM pool '%s'\n", name);
245*4882a593Smuzhiyun
246*4882a593Smuzhiyun sram_dma_heap = kzalloc(sizeof(*sram_dma_heap), GFP_KERNEL);
247*4882a593Smuzhiyun if (!sram_dma_heap)
248*4882a593Smuzhiyun return -ENOMEM;
249*4882a593Smuzhiyun sram_dma_heap->pool = sram_gp;
250*4882a593Smuzhiyun
251*4882a593Smuzhiyun exp_info.name = "sram_dma_heap";
252*4882a593Smuzhiyun exp_info.ops = &sram_dma_heap_ops;
253*4882a593Smuzhiyun exp_info.priv = sram_dma_heap;
254*4882a593Smuzhiyun
255*4882a593Smuzhiyun sram_dma_heap_global = sram_dma_heap;
256*4882a593Smuzhiyun
257*4882a593Smuzhiyun sram_dma_heap->heap = dma_heap_add(&exp_info);
258*4882a593Smuzhiyun if (IS_ERR(sram_dma_heap->heap)) {
259*4882a593Smuzhiyun int ret = PTR_ERR(sram_dma_heap->heap);
260*4882a593Smuzhiyun
261*4882a593Smuzhiyun kfree(sram_dma_heap);
262*4882a593Smuzhiyun return ret;
263*4882a593Smuzhiyun }
264*4882a593Smuzhiyun
265*4882a593Smuzhiyun return 0;
266*4882a593Smuzhiyun }
267*4882a593Smuzhiyun
sram_heap_alloc_dma_buf(size_t size)268*4882a593Smuzhiyun struct dma_buf *sram_heap_alloc_dma_buf(size_t size)
269*4882a593Smuzhiyun {
270*4882a593Smuzhiyun struct sram_dma_heap *sram_dma_heap = sram_dma_heap_global;
271*4882a593Smuzhiyun struct sram_dma_heap_buffer *buffer;
272*4882a593Smuzhiyun
273*4882a593Smuzhiyun DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
274*4882a593Smuzhiyun struct dma_buf *dmabuf;
275*4882a593Smuzhiyun int ret = -ENOMEM;
276*4882a593Smuzhiyun
277*4882a593Smuzhiyun buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
278*4882a593Smuzhiyun if (!buffer)
279*4882a593Smuzhiyun return ERR_PTR(-ENOMEM);
280*4882a593Smuzhiyun
281*4882a593Smuzhiyun buffer->pool = sram_dma_heap->pool;
282*4882a593Smuzhiyun INIT_LIST_HEAD(&buffer->attachments);
283*4882a593Smuzhiyun mutex_init(&buffer->attachments_lock);
284*4882a593Smuzhiyun buffer->len = size;
285*4882a593Smuzhiyun
286*4882a593Smuzhiyun buffer->vaddr = (void *)gen_pool_alloc(buffer->pool, buffer->len);
287*4882a593Smuzhiyun if (!buffer->vaddr) {
288*4882a593Smuzhiyun ret = -ENOMEM;
289*4882a593Smuzhiyun goto free_buffer;
290*4882a593Smuzhiyun }
291*4882a593Smuzhiyun
292*4882a593Smuzhiyun buffer->paddr = gen_pool_virt_to_phys(buffer->pool, (unsigned long)buffer->vaddr);
293*4882a593Smuzhiyun if (buffer->paddr == -1) {
294*4882a593Smuzhiyun ret = -ENOMEM;
295*4882a593Smuzhiyun goto free_pool;
296*4882a593Smuzhiyun }
297*4882a593Smuzhiyun
298*4882a593Smuzhiyun /* create the dmabuf */
299*4882a593Smuzhiyun exp_info.ops = &sram_dma_heap_buf_ops;
300*4882a593Smuzhiyun exp_info.size = buffer->len;
301*4882a593Smuzhiyun exp_info.priv = buffer;
302*4882a593Smuzhiyun dmabuf = dma_buf_export(&exp_info);
303*4882a593Smuzhiyun if (IS_ERR(dmabuf)) {
304*4882a593Smuzhiyun ret = PTR_ERR(dmabuf);
305*4882a593Smuzhiyun goto free_pool;
306*4882a593Smuzhiyun }
307*4882a593Smuzhiyun
308*4882a593Smuzhiyun return dmabuf;
309*4882a593Smuzhiyun
310*4882a593Smuzhiyun free_pool:
311*4882a593Smuzhiyun gen_pool_free(buffer->pool, (unsigned long)buffer->vaddr, buffer->len);
312*4882a593Smuzhiyun free_buffer:
313*4882a593Smuzhiyun kfree(buffer);
314*4882a593Smuzhiyun
315*4882a593Smuzhiyun return ERR_PTR(ret);
316*4882a593Smuzhiyun }
317*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(sram_heap_alloc_dma_buf);
318*4882a593Smuzhiyun
sram_heap_alloc_pages(size_t size)319*4882a593Smuzhiyun struct page *sram_heap_alloc_pages(size_t size)
320*4882a593Smuzhiyun {
321*4882a593Smuzhiyun struct sram_dma_heap *sram_dma_heap = sram_dma_heap_global;
322*4882a593Smuzhiyun
323*4882a593Smuzhiyun void *vaddr;
324*4882a593Smuzhiyun phys_addr_t paddr;
325*4882a593Smuzhiyun struct page *p;
326*4882a593Smuzhiyun
327*4882a593Smuzhiyun int ret = -ENOMEM;
328*4882a593Smuzhiyun
329*4882a593Smuzhiyun vaddr = (void *)gen_pool_alloc(sram_dma_heap->pool, size);
330*4882a593Smuzhiyun if (!vaddr) {
331*4882a593Smuzhiyun ret = -ENOMEM;
332*4882a593Smuzhiyun pr_err("no memory");
333*4882a593Smuzhiyun goto failed;
334*4882a593Smuzhiyun }
335*4882a593Smuzhiyun
336*4882a593Smuzhiyun paddr = gen_pool_virt_to_phys(sram_dma_heap->pool, (unsigned long)vaddr);
337*4882a593Smuzhiyun if (paddr == -1) {
338*4882a593Smuzhiyun ret = -ENOMEM;
339*4882a593Smuzhiyun pr_err("gen_pool_virt_to_phys failed");
340*4882a593Smuzhiyun goto free_pool;
341*4882a593Smuzhiyun }
342*4882a593Smuzhiyun
343*4882a593Smuzhiyun p = pfn_to_page(PFN_DOWN(paddr));
344*4882a593Smuzhiyun
345*4882a593Smuzhiyun return p;
346*4882a593Smuzhiyun
347*4882a593Smuzhiyun free_pool:
348*4882a593Smuzhiyun gen_pool_free(sram_dma_heap->pool, (unsigned long)vaddr, size);
349*4882a593Smuzhiyun failed:
350*4882a593Smuzhiyun
351*4882a593Smuzhiyun return ERR_PTR(ret);
352*4882a593Smuzhiyun }
353*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(sram_heap_alloc_pages);
354*4882a593Smuzhiyun
gen_pool_phys_to_virt(struct gen_pool * pool,phys_addr_t paddr)355*4882a593Smuzhiyun static u64 gen_pool_phys_to_virt(struct gen_pool *pool, phys_addr_t paddr)
356*4882a593Smuzhiyun {
357*4882a593Smuzhiyun struct gen_pool_chunk *chunk;
358*4882a593Smuzhiyun u64 vaddr = 0;
359*4882a593Smuzhiyun
360*4882a593Smuzhiyun rcu_read_lock();
361*4882a593Smuzhiyun list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) {
362*4882a593Smuzhiyun /* TODO: only suit for simple chunk now */
363*4882a593Smuzhiyun vaddr = chunk->start_addr + (paddr - chunk->phys_addr);
364*4882a593Smuzhiyun }
365*4882a593Smuzhiyun rcu_read_unlock();
366*4882a593Smuzhiyun
367*4882a593Smuzhiyun return vaddr;
368*4882a593Smuzhiyun }
369*4882a593Smuzhiyun
sram_heap_free_pages(struct page * p)370*4882a593Smuzhiyun void sram_heap_free_pages(struct page *p)
371*4882a593Smuzhiyun {
372*4882a593Smuzhiyun struct sram_dma_heap *sram_dma_heap = sram_dma_heap_global;
373*4882a593Smuzhiyun void *vaddr;
374*4882a593Smuzhiyun
375*4882a593Smuzhiyun vaddr = (void *)gen_pool_phys_to_virt(sram_dma_heap->pool, page_to_phys(p));
376*4882a593Smuzhiyun
377*4882a593Smuzhiyun gen_pool_free(sram_dma_heap->pool, (unsigned long)vaddr, PAGE_SIZE);
378*4882a593Smuzhiyun }
379*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(sram_heap_free_pages);
380*4882a593Smuzhiyun
sram_heap_free_dma_buf(struct dma_buf * dmabuf)381*4882a593Smuzhiyun void sram_heap_free_dma_buf(struct dma_buf *dmabuf)
382*4882a593Smuzhiyun {
383*4882a593Smuzhiyun struct sram_dma_heap_buffer *buffer = dmabuf->priv;
384*4882a593Smuzhiyun
385*4882a593Smuzhiyun gen_pool_free(buffer->pool, (unsigned long)buffer->vaddr, buffer->len);
386*4882a593Smuzhiyun kfree(buffer);
387*4882a593Smuzhiyun }
388*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(sram_heap_free_dma_buf);
389*4882a593Smuzhiyun
sram_heap_get_vaddr(struct dma_buf * dmabuf)390*4882a593Smuzhiyun void *sram_heap_get_vaddr(struct dma_buf *dmabuf)
391*4882a593Smuzhiyun {
392*4882a593Smuzhiyun struct sram_dma_heap_buffer *buffer = dmabuf->priv;
393*4882a593Smuzhiyun
394*4882a593Smuzhiyun return buffer->vaddr;
395*4882a593Smuzhiyun }
396*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(sram_heap_get_vaddr);
397*4882a593Smuzhiyun
sram_heap_get_paddr(struct dma_buf * dmabuf)398*4882a593Smuzhiyun phys_addr_t sram_heap_get_paddr(struct dma_buf *dmabuf)
399*4882a593Smuzhiyun {
400*4882a593Smuzhiyun struct sram_dma_heap_buffer *buffer = dmabuf->priv;
401*4882a593Smuzhiyun
402*4882a593Smuzhiyun return buffer->paddr;
403*4882a593Smuzhiyun }
404*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(sram_heap_get_paddr);
405*4882a593Smuzhiyun
rk_add_default_sram_heap(void)406*4882a593Smuzhiyun static int rk_add_default_sram_heap(void)
407*4882a593Smuzhiyun {
408*4882a593Smuzhiyun struct device_node *np = NULL;
409*4882a593Smuzhiyun struct gen_pool *sram_gp = NULL;
410*4882a593Smuzhiyun int ret = 0;
411*4882a593Smuzhiyun
412*4882a593Smuzhiyun np = of_find_compatible_node(NULL, NULL, "rockchip,sram-heap");
413*4882a593Smuzhiyun if (!np) {
414*4882a593Smuzhiyun pr_info("failed to get device node of sram-heap\n");
415*4882a593Smuzhiyun return -ENODEV;
416*4882a593Smuzhiyun }
417*4882a593Smuzhiyun
418*4882a593Smuzhiyun if (!of_device_is_available(np)) {
419*4882a593Smuzhiyun of_node_put(np);
420*4882a593Smuzhiyun return ret;
421*4882a593Smuzhiyun }
422*4882a593Smuzhiyun
423*4882a593Smuzhiyun sram_gp = of_gen_pool_get(np, "rockchip,sram", 0);
424*4882a593Smuzhiyun /* release node */
425*4882a593Smuzhiyun of_node_put(np);
426*4882a593Smuzhiyun if (sram_gp == NULL) {
427*4882a593Smuzhiyun pr_err("sram gen pool is NULL");
428*4882a593Smuzhiyun return -ENOMEM;
429*4882a593Smuzhiyun }
430*4882a593Smuzhiyun
431*4882a593Smuzhiyun ret = sram_dma_heap_export("sram-heap", sram_gp);
432*4882a593Smuzhiyun
433*4882a593Smuzhiyun return ret;
434*4882a593Smuzhiyun }
435*4882a593Smuzhiyun module_init(rk_add_default_sram_heap);
436*4882a593Smuzhiyun MODULE_DESCRIPTION("Rockchip DMA-BUF SRAM Heap");
437*4882a593Smuzhiyun MODULE_LICENSE("GPL");
438