1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Early setup for Rockchip DMA CMA
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * Copyright (C) 2022 Rockchip Electronics Co. Ltd.
6*4882a593Smuzhiyun * Author: Simon Xue <xxm@rock-chips.com>
7*4882a593Smuzhiyun */
8*4882a593Smuzhiyun
9*4882a593Smuzhiyun #include <linux/cma.h>
10*4882a593Smuzhiyun #include <linux/dma-map-ops.h>
11*4882a593Smuzhiyun
12*4882a593Smuzhiyun #include "rk-dma-heap.h"
13*4882a593Smuzhiyun
14*4882a593Smuzhiyun #define RK_DMA_HEAP_CMA_DEFAULT_SIZE SZ_32M
15*4882a593Smuzhiyun
16*4882a593Smuzhiyun static unsigned long rk_dma_heap_size __initdata;
17*4882a593Smuzhiyun static unsigned long rk_dma_heap_base __initdata;
18*4882a593Smuzhiyun
19*4882a593Smuzhiyun static struct cma *rk_dma_heap_cma;
20*4882a593Smuzhiyun
early_dma_heap_cma(char * p)21*4882a593Smuzhiyun static int __init early_dma_heap_cma(char *p)
22*4882a593Smuzhiyun {
23*4882a593Smuzhiyun if (!p) {
24*4882a593Smuzhiyun pr_err("Config string not provided\n");
25*4882a593Smuzhiyun return -EINVAL;
26*4882a593Smuzhiyun }
27*4882a593Smuzhiyun
28*4882a593Smuzhiyun rk_dma_heap_size = memparse(p, &p);
29*4882a593Smuzhiyun if (*p != '@')
30*4882a593Smuzhiyun return 0;
31*4882a593Smuzhiyun
32*4882a593Smuzhiyun rk_dma_heap_base = memparse(p + 1, &p);
33*4882a593Smuzhiyun
34*4882a593Smuzhiyun return 0;
35*4882a593Smuzhiyun }
36*4882a593Smuzhiyun early_param("rk_dma_heap_cma", early_dma_heap_cma);
37*4882a593Smuzhiyun
38*4882a593Smuzhiyun #ifndef CONFIG_DMA_CMA
39*4882a593Smuzhiyun void __weak
dma_contiguous_early_fixup(phys_addr_t base,unsigned long size)40*4882a593Smuzhiyun dma_contiguous_early_fixup(phys_addr_t base, unsigned long size)
41*4882a593Smuzhiyun {
42*4882a593Smuzhiyun }
43*4882a593Smuzhiyun #endif
44*4882a593Smuzhiyun
rk_dma_heap_cma_setup(void)45*4882a593Smuzhiyun int __init rk_dma_heap_cma_setup(void)
46*4882a593Smuzhiyun {
47*4882a593Smuzhiyun unsigned long size;
48*4882a593Smuzhiyun int ret;
49*4882a593Smuzhiyun bool fix = false;
50*4882a593Smuzhiyun
51*4882a593Smuzhiyun if (rk_dma_heap_size)
52*4882a593Smuzhiyun size = rk_dma_heap_size;
53*4882a593Smuzhiyun else
54*4882a593Smuzhiyun size = RK_DMA_HEAP_CMA_DEFAULT_SIZE;
55*4882a593Smuzhiyun
56*4882a593Smuzhiyun if (rk_dma_heap_base)
57*4882a593Smuzhiyun fix = true;
58*4882a593Smuzhiyun
59*4882a593Smuzhiyun ret = cma_declare_contiguous(rk_dma_heap_base, PAGE_ALIGN(size), 0x0,
60*4882a593Smuzhiyun PAGE_SIZE, 0, fix, "rk-dma-heap-cma",
61*4882a593Smuzhiyun &rk_dma_heap_cma);
62*4882a593Smuzhiyun if (ret)
63*4882a593Smuzhiyun return ret;
64*4882a593Smuzhiyun
65*4882a593Smuzhiyun #if !IS_ENABLED(CONFIG_CMA_INACTIVE)
66*4882a593Smuzhiyun /* Architecture specific contiguous memory fixup. */
67*4882a593Smuzhiyun dma_contiguous_early_fixup(cma_get_base(rk_dma_heap_cma),
68*4882a593Smuzhiyun cma_get_size(rk_dma_heap_cma));
69*4882a593Smuzhiyun #endif
70*4882a593Smuzhiyun
71*4882a593Smuzhiyun return 0;
72*4882a593Smuzhiyun }
73*4882a593Smuzhiyun
rk_dma_heap_get_cma(void)74*4882a593Smuzhiyun struct cma *rk_dma_heap_get_cma(void)
75*4882a593Smuzhiyun {
76*4882a593Smuzhiyun return rk_dma_heap_cma;
77*4882a593Smuzhiyun }
78