xref: /OK3568_Linux_fs/kernel/drivers/dma-buf/heaps/sram_heap.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * SRAM DMA-Heap exporter && support alloc page and dmabuf on kernel
4  *
5  * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com/
6  *
7  * Author: Andrew F. Davis <afd@ti.com>
8  *
9  * Copyright (C) 2022 Rockchip Electronics Co., Ltd.
10  *
11  * Author: Huang Lee <Putin.li@rock-chips.com>
12  */
13 #define pr_fmt(fmt) "sram_heap: " fmt
14 
15 #include <linux/dma-mapping.h>
16 #include <linux/err.h>
17 #include <linux/genalloc.h>
18 #include <linux/io.h>
19 #include <linux/mm.h>
20 #include <linux/scatterlist.h>
21 #include <linux/slab.h>
22 #include <linux/dma-buf.h>
23 #include <linux/dma-heap.h>
24 #include <linux/delay.h>
25 #include <linux/of.h>
26 #include <linux/of_address.h>
27 #include <linux/module.h>
28 
29 #include <linux/sram_heap.h>
30 #include <linux/of_platform.h>
31 #include <linux/platform_device.h>
32 
33 #define RK3588_SRAM_BASE 0xff001000
34 
35 struct sram_dma_heap {
36 	struct dma_heap *heap;
37 	struct gen_pool *pool;
38 };
39 
40 struct sram_dma_heap_buffer {
41 	struct gen_pool *pool;
42 	struct list_head attachments;
43 	struct mutex attachments_lock;
44 	unsigned long len;
45 	void *vaddr;
46 	phys_addr_t paddr;
47 };
48 
49 struct dma_heap_attachment {
50 	struct device *dev;
51 	struct sg_table *table;
52 	struct list_head list;
53 };
54 
dma_heap_attach(struct dma_buf * dmabuf,struct dma_buf_attachment * attachment)55 static int dma_heap_attach(struct dma_buf *dmabuf,
56 			   struct dma_buf_attachment *attachment)
57 {
58 	struct sram_dma_heap_buffer *buffer = dmabuf->priv;
59 	struct dma_heap_attachment *a;
60 	struct sg_table *table;
61 
62 	a = kzalloc(sizeof(*a), GFP_KERNEL);
63 	if (!a)
64 		return -ENOMEM;
65 
66 	table = kmalloc(sizeof(*table), GFP_KERNEL);
67 	if (!table)
68 		goto table_alloc_failed;
69 
70 	if (sg_alloc_table(table, 1, GFP_KERNEL))
71 		goto sg_alloc_failed;
72 
73 	/*
74 	 * The referenced pfn and page are for setting the sram address to the
75 	 * sgtable, and cannot be used for other purposes, and cannot be accessed
76 	 * directly or indirectly.
77 	 *
78 	 * And not sure if there is a problem with the 32-bit system.
79 	 *
80 	 * page cannot support kmap func.
81 	 */
82 	sg_set_page(table->sgl, pfn_to_page(PFN_DOWN(buffer->paddr)), buffer->len, 0);
83 
84 	a->table = table;
85 	a->dev = attachment->dev;
86 	INIT_LIST_HEAD(&a->list);
87 
88 	attachment->priv = a;
89 
90 	mutex_lock(&buffer->attachments_lock);
91 	list_add(&a->list, &buffer->attachments);
92 	mutex_unlock(&buffer->attachments_lock);
93 
94 	return 0;
95 
96 sg_alloc_failed:
97 	kfree(table);
98 table_alloc_failed:
99 	kfree(a);
100 	return -ENOMEM;
101 }
102 
dma_heap_detatch(struct dma_buf * dmabuf,struct dma_buf_attachment * attachment)103 static void dma_heap_detatch(struct dma_buf *dmabuf,
104 			     struct dma_buf_attachment *attachment)
105 {
106 	struct sram_dma_heap_buffer *buffer = dmabuf->priv;
107 	struct dma_heap_attachment *a = attachment->priv;
108 
109 	mutex_lock(&buffer->attachments_lock);
110 	list_del(&a->list);
111 	mutex_unlock(&buffer->attachments_lock);
112 
113 	sg_free_table(a->table);
114 	kfree(a->table);
115 	kfree(a);
116 }
117 
dma_heap_map_dma_buf(struct dma_buf_attachment * attachment,enum dma_data_direction direction)118 static struct sg_table *dma_heap_map_dma_buf(struct dma_buf_attachment *attachment,
119 					     enum dma_data_direction direction)
120 {
121 	struct dma_heap_attachment *a = attachment->priv;
122 	struct sg_table *table = a->table;
123 	int ret = 0;
124 
125 	ret = dma_map_sgtable(attachment->dev, table, direction, DMA_ATTR_SKIP_CPU_SYNC);
126 	if (ret)
127 		return ERR_PTR(-ENOMEM);
128 
129 	return table;
130 }
131 
dma_heap_unmap_dma_buf(struct dma_buf_attachment * attachment,struct sg_table * table,enum dma_data_direction direction)132 static void dma_heap_unmap_dma_buf(struct dma_buf_attachment *attachment,
133 				   struct sg_table *table,
134 				   enum dma_data_direction direction)
135 {
136 	dma_unmap_sgtable(attachment->dev, table, direction, DMA_ATTR_SKIP_CPU_SYNC);
137 }
138 
dma_heap_dma_buf_release(struct dma_buf * dmabuf)139 static void dma_heap_dma_buf_release(struct dma_buf *dmabuf)
140 {
141 	struct sram_dma_heap_buffer *buffer = dmabuf->priv;
142 
143 	gen_pool_free(buffer->pool, (unsigned long)buffer->vaddr, buffer->len);
144 	kfree(buffer);
145 }
146 
dma_heap_mmap(struct dma_buf * dmabuf,struct vm_area_struct * vma)147 static int dma_heap_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
148 {
149 	struct sram_dma_heap_buffer *buffer = dmabuf->priv;
150 	int ret;
151 
152 	/* SRAM mappings are not cached */
153 	vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
154 
155 	ret = vm_iomap_memory(vma, buffer->paddr, buffer->len);
156 	if (ret)
157 		pr_err("Could not map buffer to userspace\n");
158 
159 	return ret;
160 }
161 
dma_heap_vmap(struct dma_buf * dmabuf)162 static void *dma_heap_vmap(struct dma_buf *dmabuf)
163 {
164 	struct sram_dma_heap_buffer *buffer = dmabuf->priv;
165 
166 	return buffer->vaddr;
167 }
168 
169 static const struct dma_buf_ops sram_dma_heap_buf_ops = {
170 	.attach = dma_heap_attach,
171 	.detach = dma_heap_detatch,
172 	.map_dma_buf = dma_heap_map_dma_buf,
173 	.unmap_dma_buf = dma_heap_unmap_dma_buf,
174 	.release = dma_heap_dma_buf_release,
175 	.mmap = dma_heap_mmap,
176 	.vmap = dma_heap_vmap,
177 };
178 
sram_dma_heap_allocate(struct dma_heap * heap,unsigned long len,unsigned long fd_flags,unsigned long heap_flags)179 static struct dma_buf *sram_dma_heap_allocate(struct dma_heap *heap,
180 				unsigned long len,
181 				unsigned long fd_flags,
182 				unsigned long heap_flags)
183 {
184 	struct sram_dma_heap *sram_dma_heap = dma_heap_get_drvdata(heap);
185 	struct sram_dma_heap_buffer *buffer;
186 
187 	DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
188 	struct dma_buf *dmabuf;
189 	int ret = -ENOMEM;
190 
191 	buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
192 	if (!buffer)
193 		return ERR_PTR(-ENOMEM);
194 	buffer->pool = sram_dma_heap->pool;
195 	INIT_LIST_HEAD(&buffer->attachments);
196 	mutex_init(&buffer->attachments_lock);
197 	buffer->len = len;
198 
199 	buffer->vaddr = (void *)gen_pool_alloc(buffer->pool, buffer->len);
200 	if (!buffer->vaddr) {
201 		ret = -ENOMEM;
202 		goto free_buffer;
203 	}
204 
205 	buffer->paddr = gen_pool_virt_to_phys(buffer->pool, (unsigned long)buffer->vaddr);
206 	if (buffer->paddr == -1) {
207 		ret = -ENOMEM;
208 		goto free_pool;
209 	}
210 
211 	/* create the dmabuf */
212 	exp_info.ops = &sram_dma_heap_buf_ops;
213 	exp_info.size = buffer->len;
214 	exp_info.flags = fd_flags;
215 	exp_info.priv = buffer;
216 	dmabuf = dma_buf_export(&exp_info);
217 	if (IS_ERR(dmabuf)) {
218 		ret = PTR_ERR(dmabuf);
219 		goto free_pool;
220 	}
221 
222 	return dmabuf;
223 
224 free_pool:
225 	gen_pool_free(buffer->pool, (unsigned long)buffer->vaddr, buffer->len);
226 free_buffer:
227 	kfree(buffer);
228 
229 	return ERR_PTR(ret);
230 }
231 
232 static struct dma_heap_ops sram_dma_heap_ops = {
233 	.allocate = sram_dma_heap_allocate,
234 };
235 
236 static struct sram_dma_heap *sram_dma_heap_global;
237 
sram_dma_heap_export(const char * name,struct gen_pool * sram_gp)238 static int sram_dma_heap_export(const char *name,
239 			 struct gen_pool *sram_gp)
240 {
241 	struct sram_dma_heap *sram_dma_heap;
242 	struct dma_heap_export_info exp_info;
243 
244 	pr_info("Exporting SRAM pool '%s'\n", name);
245 
246 	sram_dma_heap = kzalloc(sizeof(*sram_dma_heap), GFP_KERNEL);
247 	if (!sram_dma_heap)
248 		return -ENOMEM;
249 	sram_dma_heap->pool = sram_gp;
250 
251 	exp_info.name = "sram_dma_heap";
252 	exp_info.ops = &sram_dma_heap_ops;
253 	exp_info.priv = sram_dma_heap;
254 
255 	sram_dma_heap_global = sram_dma_heap;
256 
257 	sram_dma_heap->heap = dma_heap_add(&exp_info);
258 	if (IS_ERR(sram_dma_heap->heap)) {
259 		int ret = PTR_ERR(sram_dma_heap->heap);
260 
261 		kfree(sram_dma_heap);
262 		return ret;
263 	}
264 
265 	return 0;
266 }
267 
sram_heap_alloc_dma_buf(size_t size)268 struct dma_buf *sram_heap_alloc_dma_buf(size_t size)
269 {
270 	struct sram_dma_heap *sram_dma_heap = sram_dma_heap_global;
271 	struct sram_dma_heap_buffer *buffer;
272 
273 	DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
274 	struct dma_buf *dmabuf;
275 	int ret = -ENOMEM;
276 
277 	buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
278 	if (!buffer)
279 		return ERR_PTR(-ENOMEM);
280 
281 	buffer->pool = sram_dma_heap->pool;
282 	INIT_LIST_HEAD(&buffer->attachments);
283 	mutex_init(&buffer->attachments_lock);
284 	buffer->len = size;
285 
286 	buffer->vaddr = (void *)gen_pool_alloc(buffer->pool, buffer->len);
287 	if (!buffer->vaddr) {
288 		ret = -ENOMEM;
289 		goto free_buffer;
290 	}
291 
292 	buffer->paddr = gen_pool_virt_to_phys(buffer->pool, (unsigned long)buffer->vaddr);
293 	if (buffer->paddr == -1) {
294 		ret = -ENOMEM;
295 		goto free_pool;
296 	}
297 
298 	/* create the dmabuf */
299 	exp_info.ops = &sram_dma_heap_buf_ops;
300 	exp_info.size = buffer->len;
301 	exp_info.priv = buffer;
302 	dmabuf = dma_buf_export(&exp_info);
303 	if (IS_ERR(dmabuf)) {
304 		ret = PTR_ERR(dmabuf);
305 		goto free_pool;
306 	}
307 
308 	return dmabuf;
309 
310 free_pool:
311 	gen_pool_free(buffer->pool, (unsigned long)buffer->vaddr, buffer->len);
312 free_buffer:
313 	kfree(buffer);
314 
315 	return ERR_PTR(ret);
316 }
317 EXPORT_SYMBOL_GPL(sram_heap_alloc_dma_buf);
318 
sram_heap_alloc_pages(size_t size)319 struct page *sram_heap_alloc_pages(size_t size)
320 {
321 	struct sram_dma_heap *sram_dma_heap = sram_dma_heap_global;
322 
323 	void *vaddr;
324 	phys_addr_t paddr;
325 	struct page *p;
326 
327 	int ret = -ENOMEM;
328 
329 	vaddr = (void *)gen_pool_alloc(sram_dma_heap->pool, size);
330 	if (!vaddr) {
331 		ret = -ENOMEM;
332 		pr_err("no memory");
333 		goto failed;
334 	}
335 
336 	paddr = gen_pool_virt_to_phys(sram_dma_heap->pool, (unsigned long)vaddr);
337 	if (paddr == -1) {
338 		ret = -ENOMEM;
339 		pr_err("gen_pool_virt_to_phys failed");
340 		goto free_pool;
341 	}
342 
343 	p = pfn_to_page(PFN_DOWN(paddr));
344 
345 	return p;
346 
347 free_pool:
348 	gen_pool_free(sram_dma_heap->pool, (unsigned long)vaddr, size);
349 failed:
350 
351 	return ERR_PTR(ret);
352 }
353 EXPORT_SYMBOL_GPL(sram_heap_alloc_pages);
354 
gen_pool_phys_to_virt(struct gen_pool * pool,phys_addr_t paddr)355 static u64 gen_pool_phys_to_virt(struct gen_pool *pool, phys_addr_t paddr)
356 {
357 	struct gen_pool_chunk *chunk;
358 	u64 vaddr = 0;
359 
360 	rcu_read_lock();
361 	list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) {
362 		/* TODO: only suit for simple chunk now */
363 		vaddr = chunk->start_addr + (paddr - chunk->phys_addr);
364 	}
365 	rcu_read_unlock();
366 
367 	return vaddr;
368 }
369 
sram_heap_free_pages(struct page * p)370 void sram_heap_free_pages(struct page *p)
371 {
372 	struct sram_dma_heap *sram_dma_heap = sram_dma_heap_global;
373 	void *vaddr;
374 
375 	vaddr = (void *)gen_pool_phys_to_virt(sram_dma_heap->pool, page_to_phys(p));
376 
377 	gen_pool_free(sram_dma_heap->pool, (unsigned long)vaddr, PAGE_SIZE);
378 }
379 EXPORT_SYMBOL_GPL(sram_heap_free_pages);
380 
sram_heap_free_dma_buf(struct dma_buf * dmabuf)381 void sram_heap_free_dma_buf(struct dma_buf *dmabuf)
382 {
383 	struct sram_dma_heap_buffer *buffer = dmabuf->priv;
384 
385 	gen_pool_free(buffer->pool, (unsigned long)buffer->vaddr, buffer->len);
386 	kfree(buffer);
387 }
388 EXPORT_SYMBOL_GPL(sram_heap_free_dma_buf);
389 
sram_heap_get_vaddr(struct dma_buf * dmabuf)390 void *sram_heap_get_vaddr(struct dma_buf *dmabuf)
391 {
392 	struct sram_dma_heap_buffer *buffer = dmabuf->priv;
393 
394 	return buffer->vaddr;
395 }
396 EXPORT_SYMBOL_GPL(sram_heap_get_vaddr);
397 
sram_heap_get_paddr(struct dma_buf * dmabuf)398 phys_addr_t sram_heap_get_paddr(struct dma_buf *dmabuf)
399 {
400 	struct sram_dma_heap_buffer *buffer = dmabuf->priv;
401 
402 	return buffer->paddr;
403 }
404 EXPORT_SYMBOL_GPL(sram_heap_get_paddr);
405 
rk_add_default_sram_heap(void)406 static int rk_add_default_sram_heap(void)
407 {
408 	struct device_node *np = NULL;
409 	struct gen_pool *sram_gp = NULL;
410 	int ret = 0;
411 
412 	np = of_find_compatible_node(NULL, NULL, "rockchip,sram-heap");
413 	if (!np) {
414 		pr_info("failed to get device node of sram-heap\n");
415 		return -ENODEV;
416 	}
417 
418 	if (!of_device_is_available(np)) {
419 		of_node_put(np);
420 		return ret;
421 	}
422 
423 	sram_gp = of_gen_pool_get(np, "rockchip,sram", 0);
424 	/* release node */
425 	of_node_put(np);
426 	if (sram_gp == NULL) {
427 		pr_err("sram gen pool is NULL");
428 		return -ENOMEM;
429 	}
430 
431 	ret = sram_dma_heap_export("sram-heap", sram_gp);
432 
433 	return ret;
434 }
435 module_init(rk_add_default_sram_heap);
436 MODULE_DESCRIPTION("Rockchip DMA-BUF SRAM Heap");
437 MODULE_LICENSE("GPL");
438