xref: /OK3568_Linux_fs/kernel/drivers/dma-buf/heaps/rk_system_heap.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * DMABUF System heap exporter for Rockchip
4  *
5  * Copyright (C) 2011 Google, Inc.
6  * Copyright (C) 2019, 2020 Linaro Ltd.
7  * Copyright (c) 2021, 2022 Rockchip Electronics Co. Ltd.
8  *
9  * Portions based off of Andrew Davis' SRAM heap:
10  * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com/
11  *	Andrew F. Davis <afd@ti.com>
12  */
13 
14 #include <linux/dma-buf.h>
15 #include <linux/dma-mapping.h>
16 #include <linux/dma-heap.h>
17 #include <linux/err.h>
18 #include <linux/highmem.h>
19 #include <linux/mm.h>
20 #include <linux/module.h>
21 #include <linux/scatterlist.h>
22 #include <linux/slab.h>
23 #include <linux/swiotlb.h>
24 #include <linux/vmalloc.h>
25 #include <linux/rockchip/rockchip_sip.h>
26 
27 #include "page_pool.h"
28 #include "deferred-free-helper.h"
29 
30 static struct dma_heap *sys_heap;
31 static struct dma_heap *sys_dma32_heap;
32 static struct dma_heap *sys_uncached_heap;
33 static struct dma_heap *sys_uncached_dma32_heap;
34 
35 /* Default setting */
36 static u32 bank_bit_first = 12;
37 static u32 bank_bit_mask = 0x7;
38 
39 struct system_heap_buffer {
40 	struct dma_heap *heap;
41 	struct list_head attachments;
42 	struct mutex lock;
43 	unsigned long len;
44 	struct sg_table sg_table;
45 	int vmap_cnt;
46 	void *vaddr;
47 	struct deferred_freelist_item deferred_free;
48 	struct dmabuf_page_pool **pools;
49 	bool uncached;
50 };
51 
52 struct dma_heap_attachment {
53 	struct device *dev;
54 	struct sg_table *table;
55 	struct list_head list;
56 	bool mapped;
57 
58 	bool uncached;
59 };
60 
61 #define LOW_ORDER_GFP (GFP_HIGHUSER | __GFP_ZERO)
62 #define HIGH_ORDER_GFP  (((GFP_HIGHUSER | __GFP_ZERO | __GFP_NOWARN \
63 				| __GFP_NORETRY) & ~__GFP_RECLAIM) \
64 				| __GFP_COMP)
65 static gfp_t order_flags[] = {HIGH_ORDER_GFP, HIGH_ORDER_GFP, LOW_ORDER_GFP};
66 /*
67  * The selection of the orders used for allocation (1MB, 64K, 4K) is designed
68  * to match with the sizes often found in IOMMUs. Using order 4 pages instead
69  * of order 0 pages can significantly improve the performance of many IOMMUs
70  * by reducing TLB pressure and time spent updating page tables.
71  */
72 static unsigned int orders[] = {8, 4, 0};
73 #define NUM_ORDERS ARRAY_SIZE(orders)
74 struct dmabuf_page_pool *pools[NUM_ORDERS];
75 struct dmabuf_page_pool *dma32_pools[NUM_ORDERS];
76 
dup_sg_table(struct sg_table * table)77 static struct sg_table *dup_sg_table(struct sg_table *table)
78 {
79 	struct sg_table *new_table;
80 	int ret, i;
81 	struct scatterlist *sg, *new_sg;
82 
83 	new_table = kzalloc(sizeof(*new_table), GFP_KERNEL);
84 	if (!new_table)
85 		return ERR_PTR(-ENOMEM);
86 
87 	ret = sg_alloc_table(new_table, table->orig_nents, GFP_KERNEL);
88 	if (ret) {
89 		kfree(new_table);
90 		return ERR_PTR(-ENOMEM);
91 	}
92 
93 	new_sg = new_table->sgl;
94 	for_each_sgtable_sg(table, sg, i) {
95 		sg_set_page(new_sg, sg_page(sg), sg->length, sg->offset);
96 		new_sg = sg_next(new_sg);
97 	}
98 
99 	return new_table;
100 }
101 
system_heap_attach(struct dma_buf * dmabuf,struct dma_buf_attachment * attachment)102 static int system_heap_attach(struct dma_buf *dmabuf,
103 			      struct dma_buf_attachment *attachment)
104 {
105 	struct system_heap_buffer *buffer = dmabuf->priv;
106 	struct dma_heap_attachment *a;
107 	struct sg_table *table;
108 
109 	a = kzalloc(sizeof(*a), GFP_KERNEL);
110 	if (!a)
111 		return -ENOMEM;
112 
113 	table = dup_sg_table(&buffer->sg_table);
114 	if (IS_ERR(table)) {
115 		kfree(a);
116 		return -ENOMEM;
117 	}
118 
119 	a->table = table;
120 	a->dev = attachment->dev;
121 	INIT_LIST_HEAD(&a->list);
122 	a->mapped = false;
123 	a->uncached = buffer->uncached;
124 	attachment->priv = a;
125 
126 	mutex_lock(&buffer->lock);
127 	list_add(&a->list, &buffer->attachments);
128 	mutex_unlock(&buffer->lock);
129 
130 	return 0;
131 }
132 
system_heap_detach(struct dma_buf * dmabuf,struct dma_buf_attachment * attachment)133 static void system_heap_detach(struct dma_buf *dmabuf,
134 			       struct dma_buf_attachment *attachment)
135 {
136 	struct system_heap_buffer *buffer = dmabuf->priv;
137 	struct dma_heap_attachment *a = attachment->priv;
138 
139 	mutex_lock(&buffer->lock);
140 	list_del(&a->list);
141 	mutex_unlock(&buffer->lock);
142 
143 	sg_free_table(a->table);
144 	kfree(a->table);
145 	kfree(a);
146 }
147 
system_heap_map_dma_buf(struct dma_buf_attachment * attachment,enum dma_data_direction direction)148 static struct sg_table *system_heap_map_dma_buf(struct dma_buf_attachment *attachment,
149 						enum dma_data_direction direction)
150 {
151 	struct dma_heap_attachment *a = attachment->priv;
152 	struct sg_table *table = a->table;
153 	int attr = attachment->dma_map_attrs;
154 	int ret;
155 
156 	if (a->uncached)
157 		attr |= DMA_ATTR_SKIP_CPU_SYNC;
158 
159 	ret = dma_map_sgtable(attachment->dev, table, direction, attr);
160 	if (ret)
161 		return ERR_PTR(ret);
162 
163 	a->mapped = true;
164 	return table;
165 }
166 
system_heap_unmap_dma_buf(struct dma_buf_attachment * attachment,struct sg_table * table,enum dma_data_direction direction)167 static void system_heap_unmap_dma_buf(struct dma_buf_attachment *attachment,
168 				      struct sg_table *table,
169 				      enum dma_data_direction direction)
170 {
171 	struct dma_heap_attachment *a = attachment->priv;
172 	int attr = attachment->dma_map_attrs;
173 
174 	if (a->uncached)
175 		attr |= DMA_ATTR_SKIP_CPU_SYNC;
176 	a->mapped = false;
177 	dma_unmap_sgtable(attachment->dev, table, direction, attr);
178 }
179 
system_heap_dma_buf_begin_cpu_access(struct dma_buf * dmabuf,enum dma_data_direction direction)180 static int system_heap_dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
181 						enum dma_data_direction direction)
182 {
183 	struct system_heap_buffer *buffer = dmabuf->priv;
184 	struct dma_heap_attachment *a;
185 
186 	mutex_lock(&buffer->lock);
187 
188 	if (buffer->vmap_cnt)
189 		invalidate_kernel_vmap_range(buffer->vaddr, buffer->len);
190 
191 	if (!buffer->uncached) {
192 		list_for_each_entry(a, &buffer->attachments, list) {
193 			if (!a->mapped)
194 				continue;
195 			dma_sync_sgtable_for_cpu(a->dev, a->table, direction);
196 		}
197 	}
198 	mutex_unlock(&buffer->lock);
199 
200 	return 0;
201 }
202 
system_heap_dma_buf_end_cpu_access(struct dma_buf * dmabuf,enum dma_data_direction direction)203 static int system_heap_dma_buf_end_cpu_access(struct dma_buf *dmabuf,
204 					      enum dma_data_direction direction)
205 {
206 	struct system_heap_buffer *buffer = dmabuf->priv;
207 	struct dma_heap_attachment *a;
208 
209 	mutex_lock(&buffer->lock);
210 
211 	if (buffer->vmap_cnt)
212 		flush_kernel_vmap_range(buffer->vaddr, buffer->len);
213 
214 	if (!buffer->uncached) {
215 		list_for_each_entry(a, &buffer->attachments, list) {
216 			if (!a->mapped)
217 				continue;
218 			dma_sync_sgtable_for_device(a->dev, a->table, direction);
219 		}
220 	}
221 	mutex_unlock(&buffer->lock);
222 
223 	return 0;
224 }
225 
system_heap_sgl_sync_range(struct device * dev,struct sg_table * sgt,unsigned int offset,unsigned int length,enum dma_data_direction dir,bool for_cpu)226 static int system_heap_sgl_sync_range(struct device *dev,
227 				      struct sg_table *sgt,
228 				      unsigned int offset,
229 				      unsigned int length,
230 				      enum dma_data_direction dir,
231 				      bool for_cpu)
232 {
233 	struct scatterlist *sg;
234 	unsigned int len = 0;
235 	dma_addr_t sg_dma_addr;
236 	int i;
237 
238 	for_each_sgtable_sg(sgt, sg, i) {
239 		unsigned int sg_offset, sg_left, size = 0;
240 
241 		sg_dma_addr = sg_phys(sg);
242 
243 		len += sg->length;
244 		if (len <= offset)
245 			continue;
246 
247 		sg_left = len - offset;
248 		sg_offset = sg->length - sg_left;
249 
250 		size = (length < sg_left) ? length : sg_left;
251 		if (for_cpu)
252 			dma_sync_single_range_for_cpu(dev, sg_dma_addr,
253 						      sg_offset, size, dir);
254 		else
255 			dma_sync_single_range_for_device(dev, sg_dma_addr,
256 							 sg_offset, size, dir);
257 
258 		offset += size;
259 		length -= size;
260 
261 		if (length == 0)
262 			break;
263 	}
264 
265 	return 0;
266 }
267 
268 static int __maybe_unused
system_heap_dma_buf_begin_cpu_access_partial(struct dma_buf * dmabuf,enum dma_data_direction direction,unsigned int offset,unsigned int len)269 system_heap_dma_buf_begin_cpu_access_partial(struct dma_buf *dmabuf,
270 					     enum dma_data_direction direction,
271 					     unsigned int offset,
272 					     unsigned int len)
273 {
274 	struct system_heap_buffer *buffer = dmabuf->priv;
275 	struct dma_heap *heap = buffer->heap;
276 	struct sg_table *table = &buffer->sg_table;
277 	int ret;
278 
279 	if (direction == DMA_TO_DEVICE)
280 		return 0;
281 
282 	mutex_lock(&buffer->lock);
283 	if (buffer->vmap_cnt)
284 		invalidate_kernel_vmap_range(buffer->vaddr, buffer->len);
285 
286 	if (buffer->uncached) {
287 		mutex_unlock(&buffer->lock);
288 		return 0;
289 	}
290 
291 	ret = system_heap_sgl_sync_range(dma_heap_get_dev(heap), table,
292 					 offset, len, direction, true);
293 	mutex_unlock(&buffer->lock);
294 
295 	return ret;
296 }
297 
298 static int __maybe_unused
system_heap_dma_buf_end_cpu_access_partial(struct dma_buf * dmabuf,enum dma_data_direction direction,unsigned int offset,unsigned int len)299 system_heap_dma_buf_end_cpu_access_partial(struct dma_buf *dmabuf,
300 					   enum dma_data_direction direction,
301 					   unsigned int offset,
302 					   unsigned int len)
303 {
304 	struct system_heap_buffer *buffer = dmabuf->priv;
305 	struct dma_heap *heap = buffer->heap;
306 	struct sg_table *table = &buffer->sg_table;
307 	int ret;
308 
309 	mutex_lock(&buffer->lock);
310 	if (buffer->vmap_cnt)
311 		flush_kernel_vmap_range(buffer->vaddr, buffer->len);
312 
313 	if (buffer->uncached) {
314 		mutex_unlock(&buffer->lock);
315 		return 0;
316 	}
317 
318 	ret = system_heap_sgl_sync_range(dma_heap_get_dev(heap), table,
319 					 offset, len, direction, false);
320 	mutex_unlock(&buffer->lock);
321 
322 	return ret;
323 }
324 
system_heap_mmap(struct dma_buf * dmabuf,struct vm_area_struct * vma)325 static int system_heap_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
326 {
327 	struct system_heap_buffer *buffer = dmabuf->priv;
328 	struct sg_table *table = &buffer->sg_table;
329 	unsigned long addr = vma->vm_start;
330 	struct sg_page_iter piter;
331 	int ret;
332 
333 	if (buffer->uncached)
334 		vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
335 
336 	for_each_sgtable_page(table, &piter, vma->vm_pgoff) {
337 		struct page *page = sg_page_iter_page(&piter);
338 
339 		ret = remap_pfn_range(vma, addr, page_to_pfn(page), PAGE_SIZE,
340 				      vma->vm_page_prot);
341 		if (ret)
342 			return ret;
343 		addr += PAGE_SIZE;
344 		if (addr >= vma->vm_end)
345 			return 0;
346 	}
347 	return 0;
348 }
349 
system_heap_do_vmap(struct system_heap_buffer * buffer)350 static void *system_heap_do_vmap(struct system_heap_buffer *buffer)
351 {
352 	struct sg_table *table = &buffer->sg_table;
353 	int npages = PAGE_ALIGN(buffer->len) / PAGE_SIZE;
354 	struct page **pages = vmalloc(sizeof(struct page *) * npages);
355 	struct page **tmp = pages;
356 	struct sg_page_iter piter;
357 	pgprot_t pgprot = PAGE_KERNEL;
358 	void *vaddr;
359 
360 	if (!pages)
361 		return ERR_PTR(-ENOMEM);
362 
363 	if (buffer->uncached)
364 		pgprot = pgprot_writecombine(PAGE_KERNEL);
365 
366 	for_each_sgtable_page(table, &piter, 0) {
367 		WARN_ON(tmp - pages >= npages);
368 		*tmp++ = sg_page_iter_page(&piter);
369 	}
370 
371 	vaddr = vmap(pages, npages, VM_MAP, pgprot);
372 	vfree(pages);
373 
374 	if (!vaddr)
375 		return ERR_PTR(-ENOMEM);
376 
377 	return vaddr;
378 }
379 
system_heap_vmap(struct dma_buf * dmabuf)380 static void *system_heap_vmap(struct dma_buf *dmabuf)
381 {
382 	struct system_heap_buffer *buffer = dmabuf->priv;
383 	void *vaddr;
384 
385 	mutex_lock(&buffer->lock);
386 	if (buffer->vmap_cnt) {
387 		buffer->vmap_cnt++;
388 		vaddr = buffer->vaddr;
389 		goto out;
390 	}
391 
392 	vaddr = system_heap_do_vmap(buffer);
393 	if (IS_ERR(vaddr))
394 		goto out;
395 
396 	buffer->vaddr = vaddr;
397 	buffer->vmap_cnt++;
398 out:
399 	mutex_unlock(&buffer->lock);
400 
401 	return vaddr;
402 }
403 
system_heap_vunmap(struct dma_buf * dmabuf,void * vaddr)404 static void system_heap_vunmap(struct dma_buf *dmabuf, void *vaddr)
405 {
406 	struct system_heap_buffer *buffer = dmabuf->priv;
407 
408 	mutex_lock(&buffer->lock);
409 	if (!--buffer->vmap_cnt) {
410 		vunmap(buffer->vaddr);
411 		buffer->vaddr = NULL;
412 	}
413 	mutex_unlock(&buffer->lock);
414 }
415 
system_heap_zero_buffer(struct system_heap_buffer * buffer)416 static int system_heap_zero_buffer(struct system_heap_buffer *buffer)
417 {
418 	struct sg_table *sgt = &buffer->sg_table;
419 	struct sg_page_iter piter;
420 	struct page *p;
421 	void *vaddr;
422 	int ret = 0;
423 
424 	for_each_sgtable_page(sgt, &piter, 0) {
425 		p = sg_page_iter_page(&piter);
426 		vaddr = kmap_atomic(p);
427 		memset(vaddr, 0, PAGE_SIZE);
428 		kunmap_atomic(vaddr);
429 	}
430 
431 	return ret;
432 }
433 
system_heap_buf_free(struct deferred_freelist_item * item,enum df_reason reason)434 static void system_heap_buf_free(struct deferred_freelist_item *item,
435 				 enum df_reason reason)
436 {
437 	struct system_heap_buffer *buffer;
438 	struct sg_table *table;
439 	struct scatterlist *sg;
440 	int i, j;
441 
442 	buffer = container_of(item, struct system_heap_buffer, deferred_free);
443 	/* Zero the buffer pages before adding back to the pool */
444 	if (reason == DF_NORMAL)
445 		if (system_heap_zero_buffer(buffer))
446 			reason = DF_UNDER_PRESSURE; // On failure, just free
447 
448 	table = &buffer->sg_table;
449 	for_each_sgtable_sg(table, sg, i) {
450 		struct page *page = sg_page(sg);
451 
452 		if (reason == DF_UNDER_PRESSURE) {
453 			__free_pages(page, compound_order(page));
454 		} else {
455 			for (j = 0; j < NUM_ORDERS; j++) {
456 				if (compound_order(page) == orders[j])
457 					break;
458 			}
459 			dmabuf_page_pool_free(buffer->pools[j], page);
460 		}
461 	}
462 	sg_free_table(table);
463 	kfree(buffer);
464 }
465 
system_heap_dma_buf_release(struct dma_buf * dmabuf)466 static void system_heap_dma_buf_release(struct dma_buf *dmabuf)
467 {
468 	struct system_heap_buffer *buffer = dmabuf->priv;
469 	int npages = PAGE_ALIGN(buffer->len) / PAGE_SIZE;
470 
471 	deferred_free(&buffer->deferred_free, system_heap_buf_free, npages);
472 }
473 
474 static const struct dma_buf_ops system_heap_buf_ops = {
475 	.attach = system_heap_attach,
476 	.detach = system_heap_detach,
477 	.map_dma_buf = system_heap_map_dma_buf,
478 	.unmap_dma_buf = system_heap_unmap_dma_buf,
479 	.begin_cpu_access = system_heap_dma_buf_begin_cpu_access,
480 	.end_cpu_access = system_heap_dma_buf_end_cpu_access,
481 #ifdef CONFIG_DMABUF_PARTIAL
482 	.begin_cpu_access_partial = system_heap_dma_buf_begin_cpu_access_partial,
483 	.end_cpu_access_partial = system_heap_dma_buf_end_cpu_access_partial,
484 #endif
485 	.mmap = system_heap_mmap,
486 	.vmap = system_heap_vmap,
487 	.vunmap = system_heap_vunmap,
488 	.release = system_heap_dma_buf_release,
489 };
490 
system_heap_alloc_largest_available(struct dma_heap * heap,struct dmabuf_page_pool ** pool,unsigned long size,unsigned int max_order)491 static struct page *system_heap_alloc_largest_available(struct dma_heap *heap,
492 							struct dmabuf_page_pool **pool,
493 							unsigned long size,
494 							unsigned int max_order)
495 {
496 	struct page *page;
497 	int i;
498 
499 	for (i = 0; i < NUM_ORDERS; i++) {
500 		if (size <  (PAGE_SIZE << orders[i]))
501 			continue;
502 		if (max_order < orders[i])
503 			continue;
504 		page = dmabuf_page_pool_alloc(pool[i]);
505 		if (!page)
506 			continue;
507 		return page;
508 	}
509 	return NULL;
510 }
511 
system_heap_do_allocate(struct dma_heap * heap,unsigned long len,unsigned long fd_flags,unsigned long heap_flags,bool uncached)512 static struct dma_buf *system_heap_do_allocate(struct dma_heap *heap,
513 					       unsigned long len,
514 					       unsigned long fd_flags,
515 					       unsigned long heap_flags,
516 					       bool uncached)
517 {
518 	struct system_heap_buffer *buffer;
519 	DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
520 	unsigned long size_remaining = len;
521 	unsigned int max_order = orders[0];
522 	struct dma_buf *dmabuf;
523 	struct sg_table *table;
524 	struct scatterlist *sg;
525 	struct list_head pages;
526 	struct page *page, *tmp_page;
527 	int i, ret = -ENOMEM;
528 	struct list_head lists[8];
529 	unsigned int block_index[8] = {0};
530 	unsigned int block_1M = 0;
531 	unsigned int block_64K = 0;
532 	unsigned int maximum;
533 	int j;
534 
535 	buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
536 	if (!buffer)
537 		return ERR_PTR(-ENOMEM);
538 
539 	INIT_LIST_HEAD(&buffer->attachments);
540 	mutex_init(&buffer->lock);
541 	buffer->heap = heap;
542 	buffer->len = len;
543 	buffer->uncached = uncached;
544 	buffer->pools = strstr(dma_heap_get_name(heap), "dma32") ? dma32_pools : pools;
545 
546 	INIT_LIST_HEAD(&pages);
547 	for (i = 0; i < 8; i++)
548 		INIT_LIST_HEAD(&lists[i]);
549 	i = 0;
550 	while (size_remaining > 0) {
551 		/*
552 		 * Avoid trying to allocate memory if the process
553 		 * has been killed by SIGKILL
554 		 */
555 		if (fatal_signal_pending(current))
556 			goto free_buffer;
557 
558 		page = system_heap_alloc_largest_available(heap, buffer->pools,
559 							   size_remaining,
560 							   max_order);
561 		if (!page)
562 			goto free_buffer;
563 
564 		size_remaining -= page_size(page);
565 		max_order = compound_order(page);
566 		if (max_order) {
567 			if (max_order == 8)
568 				block_1M++;
569 			if (max_order == 4)
570 				block_64K++;
571 			list_add_tail(&page->lru, &pages);
572 		} else {
573 			dma_addr_t phys = page_to_phys(page);
574 			unsigned int bit_index = ((phys >> bank_bit_first) & bank_bit_mask) & 0x7;
575 
576 			list_add_tail(&page->lru, &lists[bit_index]);
577 			block_index[bit_index]++;
578 		}
579 		i++;
580 	}
581 
582 	table = &buffer->sg_table;
583 	if (sg_alloc_table(table, i, GFP_KERNEL))
584 		goto free_buffer;
585 
586 	maximum = block_index[0];
587 	for (i = 1; i < 8; i++)
588 		maximum = max(maximum, block_index[i]);
589 	sg = table->sgl;
590 	list_for_each_entry_safe(page, tmp_page, &pages, lru) {
591 		sg_set_page(sg, page, page_size(page), 0);
592 		sg = sg_next(sg);
593 		list_del(&page->lru);
594 	}
595 	for (i = 0; i < maximum; i++) {
596 		for (j = 0; j < 8; j++) {
597 			if (!list_empty(&lists[j])) {
598 				page = list_first_entry(&lists[j], struct page, lru);
599 				sg_set_page(sg, page, PAGE_SIZE, 0);
600 				sg = sg_next(sg);
601 				list_del(&page->lru);
602 			}
603 		}
604 	}
605 
606 	/* create the dmabuf */
607 	exp_info.exp_name = dma_heap_get_name(heap);
608 	exp_info.ops = &system_heap_buf_ops;
609 	exp_info.size = buffer->len;
610 	exp_info.flags = fd_flags;
611 	exp_info.priv = buffer;
612 	dmabuf = dma_buf_export(&exp_info);
613 	if (IS_ERR(dmabuf)) {
614 		ret = PTR_ERR(dmabuf);
615 		goto free_pages;
616 	}
617 
618 	/*
619 	 * For uncached buffers, we need to initially flush cpu cache, since
620 	 * the __GFP_ZERO on the allocation means the zeroing was done by the
621 	 * cpu and thus it is likely cached. Map (and implicitly flush) and
622 	 * unmap it now so we don't get corruption later on.
623 	 */
624 	if (buffer->uncached) {
625 		dma_map_sgtable(dma_heap_get_dev(heap), table, DMA_BIDIRECTIONAL, 0);
626 		dma_unmap_sgtable(dma_heap_get_dev(heap), table, DMA_BIDIRECTIONAL, 0);
627 	}
628 
629 	return dmabuf;
630 
631 free_pages:
632 	for_each_sgtable_sg(table, sg, i) {
633 		struct page *p = sg_page(sg);
634 
635 		__free_pages(p, compound_order(p));
636 	}
637 	sg_free_table(table);
638 free_buffer:
639 	list_for_each_entry_safe(page, tmp_page, &pages, lru)
640 		__free_pages(page, compound_order(page));
641 	for (i = 0; i < 8; i++) {
642 		list_for_each_entry_safe(page, tmp_page, &lists[i], lru)
643 			__free_pages(page, compound_order(page));
644 	}
645 	kfree(buffer);
646 
647 	return ERR_PTR(ret);
648 }
649 
system_heap_allocate(struct dma_heap * heap,unsigned long len,unsigned long fd_flags,unsigned long heap_flags)650 static struct dma_buf *system_heap_allocate(struct dma_heap *heap,
651 					    unsigned long len,
652 					    unsigned long fd_flags,
653 					    unsigned long heap_flags)
654 {
655 	return system_heap_do_allocate(heap, len, fd_flags, heap_flags, false);
656 }
657 
system_get_pool_size(struct dma_heap * heap)658 static long system_get_pool_size(struct dma_heap *heap)
659 {
660 	int i;
661 	long num_pages = 0;
662 	struct dmabuf_page_pool **pool;
663 
664 	pool = strstr(dma_heap_get_name(heap), "dma32") ? dma32_pools : pools;
665 	for (i = 0; i < NUM_ORDERS; i++, pool++) {
666 		num_pages += ((*pool)->count[POOL_LOWPAGE] +
667 			      (*pool)->count[POOL_HIGHPAGE]) << (*pool)->order;
668 	}
669 
670 	return num_pages << PAGE_SHIFT;
671 }
672 
673 static const struct dma_heap_ops system_heap_ops = {
674 	.allocate = system_heap_allocate,
675 	.get_pool_size = system_get_pool_size,
676 };
677 
system_uncached_heap_allocate(struct dma_heap * heap,unsigned long len,unsigned long fd_flags,unsigned long heap_flags)678 static struct dma_buf *system_uncached_heap_allocate(struct dma_heap *heap,
679 						     unsigned long len,
680 						     unsigned long fd_flags,
681 						     unsigned long heap_flags)
682 {
683 	return system_heap_do_allocate(heap, len, fd_flags, heap_flags, true);
684 }
685 
686 /* Dummy function to be used until we can call coerce_mask_and_coherent */
system_uncached_heap_not_initialized(struct dma_heap * heap,unsigned long len,unsigned long fd_flags,unsigned long heap_flags)687 static struct dma_buf *system_uncached_heap_not_initialized(struct dma_heap *heap,
688 							    unsigned long len,
689 							    unsigned long fd_flags,
690 							    unsigned long heap_flags)
691 {
692 	return ERR_PTR(-EBUSY);
693 }
694 
695 static struct dma_heap_ops system_uncached_heap_ops = {
696 	/* After system_heap_create is complete, we will swap this */
697 	.allocate = system_uncached_heap_not_initialized,
698 };
699 
set_heap_dev_dma(struct device * heap_dev)700 static int set_heap_dev_dma(struct device *heap_dev)
701 {
702 	int err = 0;
703 
704 	if (!heap_dev)
705 		return -EINVAL;
706 
707 	dma_coerce_mask_and_coherent(heap_dev, DMA_BIT_MASK(64));
708 
709 	if (!heap_dev->dma_parms) {
710 		heap_dev->dma_parms = devm_kzalloc(heap_dev,
711 						   sizeof(*heap_dev->dma_parms),
712 						   GFP_KERNEL);
713 		if (!heap_dev->dma_parms)
714 			return -ENOMEM;
715 
716 		err = dma_set_max_seg_size(heap_dev, (unsigned int)DMA_BIT_MASK(64));
717 		if (err) {
718 			devm_kfree(heap_dev, heap_dev->dma_parms);
719 			dev_err(heap_dev, "Failed to set DMA segment size, err:%d\n", err);
720 			return err;
721 		}
722 	}
723 
724 	return 0;
725 }
726 
system_heap_create(void)727 static int system_heap_create(void)
728 {
729 	struct dma_heap_export_info exp_info;
730 	int i, err = 0;
731 	struct dram_addrmap_info *ddr_map_info;
732 
733 	/*
734 	 * Since swiotlb has memory size limitation, this will calculate
735 	 * the maximum size locally.
736 	 *
737 	 * Once swiotlb_max_segment() return not '0', means that the totalram size
738 	 * is larger than 4GiB and swiotlb is not force mode, in this case, system
739 	 * heap should limit largest allocation.
740 	 *
741 	 * FIX: fix the orders[] as a workaround.
742 	 */
743 	if (swiotlb_max_segment()) {
744 		unsigned int max_size = (1 << IO_TLB_SHIFT) * IO_TLB_SEGSIZE;
745 		int max_order = MAX_ORDER;
746 		int i;
747 
748 		max_size = max_t(unsigned int, max_size, PAGE_SIZE) >> PAGE_SHIFT;
749 		max_order = min(max_order, ilog2(max_size));
750 		for (i = 0; i < NUM_ORDERS; i++) {
751 			if (max_order < orders[i])
752 				orders[i] = max_order;
753 			pr_info("system_heap: orders[%d] = %u\n", i, orders[i]);
754 		}
755 	}
756 
757 	for (i = 0; i < NUM_ORDERS; i++) {
758 		pools[i] = dmabuf_page_pool_create(order_flags[i], orders[i]);
759 
760 		if (!pools[i]) {
761 			int j;
762 
763 			pr_err("%s: page pool creation failed!\n", __func__);
764 			for (j = 0; j < i; j++)
765 				dmabuf_page_pool_destroy(pools[j]);
766 			return -ENOMEM;
767 		}
768 	}
769 
770 	for (i = 0; i < NUM_ORDERS; i++) {
771 		dma32_pools[i] = dmabuf_page_pool_create(order_flags[i] | GFP_DMA32, orders[i]);
772 
773 		if (!dma32_pools[i]) {
774 			int j;
775 
776 			pr_err("%s: page dma32 pool creation failed!\n", __func__);
777 			for (j = 0; j < i; j++)
778 				dmabuf_page_pool_destroy(dma32_pools[j]);
779 			goto err_dma32_pool;
780 		}
781 	}
782 
783 	exp_info.name = "system";
784 	exp_info.ops = &system_heap_ops;
785 	exp_info.priv = NULL;
786 
787 	sys_heap = dma_heap_add(&exp_info);
788 	if (IS_ERR(sys_heap))
789 		return PTR_ERR(sys_heap);
790 
791 	exp_info.name = "system-dma32";
792 	exp_info.ops = &system_heap_ops;
793 	exp_info.priv = NULL;
794 
795 	sys_dma32_heap = dma_heap_add(&exp_info);
796 	if (IS_ERR(sys_dma32_heap))
797 		return PTR_ERR(sys_dma32_heap);
798 
799 	exp_info.name = "system-uncached";
800 	exp_info.ops = &system_uncached_heap_ops;
801 	exp_info.priv = NULL;
802 
803 	sys_uncached_heap = dma_heap_add(&exp_info);
804 	if (IS_ERR(sys_uncached_heap))
805 		return PTR_ERR(sys_uncached_heap);
806 
807 	err = set_heap_dev_dma(dma_heap_get_dev(sys_uncached_heap));
808 	if (err)
809 		return err;
810 
811 	exp_info.name = "system-uncached-dma32";
812 	exp_info.ops = &system_uncached_heap_ops;
813 	exp_info.priv = NULL;
814 
815 	sys_uncached_dma32_heap = dma_heap_add(&exp_info);
816 	if (IS_ERR(sys_uncached_dma32_heap))
817 		return PTR_ERR(sys_uncached_dma32_heap);
818 
819 	err = set_heap_dev_dma(dma_heap_get_dev(sys_uncached_dma32_heap));
820 	if (err)
821 		return err;
822 	dma_coerce_mask_and_coherent(dma_heap_get_dev(sys_uncached_dma32_heap), DMA_BIT_MASK(32));
823 
824 	mb(); /* make sure we only set allocate after dma_mask is set */
825 	system_uncached_heap_ops.allocate = system_uncached_heap_allocate;
826 
827 	ddr_map_info = sip_smc_get_dram_map();
828 	if (ddr_map_info) {
829 		bank_bit_first = ddr_map_info->bank_bit_first;
830 		bank_bit_mask = ddr_map_info->bank_bit_mask;
831 	}
832 
833 	return 0;
834 err_dma32_pool:
835 	for (i = 0; i < NUM_ORDERS; i++)
836 		dmabuf_page_pool_destroy(pools[i]);
837 
838 	return -ENOMEM;
839 }
840 module_init(system_heap_create);
841 MODULE_LICENSE("GPL v2");
842