1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Common CPM code
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * Author: Scott Wood <scottwood@freescale.com>
6*4882a593Smuzhiyun *
7*4882a593Smuzhiyun * Copyright 2007-2008,2010 Freescale Semiconductor, Inc.
8*4882a593Smuzhiyun *
9*4882a593Smuzhiyun * Some parts derived from commproc.c/cpm2_common.c, which is:
10*4882a593Smuzhiyun * Copyright (c) 1997 Dan error_act (dmalek@jlc.net)
11*4882a593Smuzhiyun * Copyright (c) 1999-2001 Dan Malek <dan@embeddedalley.com>
12*4882a593Smuzhiyun * Copyright (c) 2000 MontaVista Software, Inc (source@mvista.com)
13*4882a593Smuzhiyun * 2006 (c) MontaVista Software, Inc.
14*4882a593Smuzhiyun * Vitaly Bordug <vbordug@ru.mvista.com>
15*4882a593Smuzhiyun */
16*4882a593Smuzhiyun #include <linux/genalloc.h>
17*4882a593Smuzhiyun #include <linux/init.h>
18*4882a593Smuzhiyun #include <linux/list.h>
19*4882a593Smuzhiyun #include <linux/of_device.h>
20*4882a593Smuzhiyun #include <linux/spinlock.h>
21*4882a593Smuzhiyun #include <linux/export.h>
22*4882a593Smuzhiyun #include <linux/of.h>
23*4882a593Smuzhiyun #include <linux/of_address.h>
24*4882a593Smuzhiyun #include <linux/slab.h>
25*4882a593Smuzhiyun #include <linux/io.h>
26*4882a593Smuzhiyun #include <soc/fsl/qe/qe.h>
27*4882a593Smuzhiyun
28*4882a593Smuzhiyun static struct gen_pool *muram_pool;
29*4882a593Smuzhiyun static spinlock_t cpm_muram_lock;
30*4882a593Smuzhiyun static u8 __iomem *muram_vbase;
31*4882a593Smuzhiyun static phys_addr_t muram_pbase;
32*4882a593Smuzhiyun
33*4882a593Smuzhiyun struct muram_block {
34*4882a593Smuzhiyun struct list_head head;
35*4882a593Smuzhiyun s32 start;
36*4882a593Smuzhiyun int size;
37*4882a593Smuzhiyun };
38*4882a593Smuzhiyun
39*4882a593Smuzhiyun static LIST_HEAD(muram_block_list);
40*4882a593Smuzhiyun
41*4882a593Smuzhiyun /* max address size we deal with */
42*4882a593Smuzhiyun #define OF_MAX_ADDR_CELLS 4
43*4882a593Smuzhiyun #define GENPOOL_OFFSET (4096 * 8)
44*4882a593Smuzhiyun
cpm_muram_init(void)45*4882a593Smuzhiyun int cpm_muram_init(void)
46*4882a593Smuzhiyun {
47*4882a593Smuzhiyun struct device_node *np;
48*4882a593Smuzhiyun struct resource r;
49*4882a593Smuzhiyun __be32 zero[OF_MAX_ADDR_CELLS] = {};
50*4882a593Smuzhiyun resource_size_t max = 0;
51*4882a593Smuzhiyun int i = 0;
52*4882a593Smuzhiyun int ret = 0;
53*4882a593Smuzhiyun
54*4882a593Smuzhiyun if (muram_pbase)
55*4882a593Smuzhiyun return 0;
56*4882a593Smuzhiyun
57*4882a593Smuzhiyun spin_lock_init(&cpm_muram_lock);
58*4882a593Smuzhiyun np = of_find_compatible_node(NULL, NULL, "fsl,cpm-muram-data");
59*4882a593Smuzhiyun if (!np) {
60*4882a593Smuzhiyun /* try legacy bindings */
61*4882a593Smuzhiyun np = of_find_node_by_name(NULL, "data-only");
62*4882a593Smuzhiyun if (!np) {
63*4882a593Smuzhiyun pr_err("Cannot find CPM muram data node");
64*4882a593Smuzhiyun ret = -ENODEV;
65*4882a593Smuzhiyun goto out_muram;
66*4882a593Smuzhiyun }
67*4882a593Smuzhiyun }
68*4882a593Smuzhiyun
69*4882a593Smuzhiyun muram_pool = gen_pool_create(0, -1);
70*4882a593Smuzhiyun if (!muram_pool) {
71*4882a593Smuzhiyun pr_err("Cannot allocate memory pool for CPM/QE muram");
72*4882a593Smuzhiyun ret = -ENOMEM;
73*4882a593Smuzhiyun goto out_muram;
74*4882a593Smuzhiyun }
75*4882a593Smuzhiyun muram_pbase = of_translate_address(np, zero);
76*4882a593Smuzhiyun if (muram_pbase == (phys_addr_t)OF_BAD_ADDR) {
77*4882a593Smuzhiyun pr_err("Cannot translate zero through CPM muram node");
78*4882a593Smuzhiyun ret = -ENODEV;
79*4882a593Smuzhiyun goto out_pool;
80*4882a593Smuzhiyun }
81*4882a593Smuzhiyun
82*4882a593Smuzhiyun while (of_address_to_resource(np, i++, &r) == 0) {
83*4882a593Smuzhiyun if (r.end > max)
84*4882a593Smuzhiyun max = r.end;
85*4882a593Smuzhiyun ret = gen_pool_add(muram_pool, r.start - muram_pbase +
86*4882a593Smuzhiyun GENPOOL_OFFSET, resource_size(&r), -1);
87*4882a593Smuzhiyun if (ret) {
88*4882a593Smuzhiyun pr_err("QE: couldn't add muram to pool!\n");
89*4882a593Smuzhiyun goto out_pool;
90*4882a593Smuzhiyun }
91*4882a593Smuzhiyun }
92*4882a593Smuzhiyun
93*4882a593Smuzhiyun muram_vbase = ioremap(muram_pbase, max - muram_pbase + 1);
94*4882a593Smuzhiyun if (!muram_vbase) {
95*4882a593Smuzhiyun pr_err("Cannot map QE muram");
96*4882a593Smuzhiyun ret = -ENOMEM;
97*4882a593Smuzhiyun goto out_pool;
98*4882a593Smuzhiyun }
99*4882a593Smuzhiyun goto out_muram;
100*4882a593Smuzhiyun out_pool:
101*4882a593Smuzhiyun gen_pool_destroy(muram_pool);
102*4882a593Smuzhiyun out_muram:
103*4882a593Smuzhiyun of_node_put(np);
104*4882a593Smuzhiyun return ret;
105*4882a593Smuzhiyun }
106*4882a593Smuzhiyun
107*4882a593Smuzhiyun /*
108*4882a593Smuzhiyun * cpm_muram_alloc_common - cpm_muram_alloc common code
109*4882a593Smuzhiyun * @size: number of bytes to allocate
110*4882a593Smuzhiyun * @algo: algorithm for alloc.
111*4882a593Smuzhiyun * @data: data for genalloc's algorithm.
112*4882a593Smuzhiyun *
113*4882a593Smuzhiyun * This function returns a non-negative offset into the muram area, or
114*4882a593Smuzhiyun * a negative errno on failure.
115*4882a593Smuzhiyun */
cpm_muram_alloc_common(unsigned long size,genpool_algo_t algo,void * data)116*4882a593Smuzhiyun static s32 cpm_muram_alloc_common(unsigned long size,
117*4882a593Smuzhiyun genpool_algo_t algo, void *data)
118*4882a593Smuzhiyun {
119*4882a593Smuzhiyun struct muram_block *entry;
120*4882a593Smuzhiyun s32 start;
121*4882a593Smuzhiyun
122*4882a593Smuzhiyun entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
123*4882a593Smuzhiyun if (!entry)
124*4882a593Smuzhiyun return -ENOMEM;
125*4882a593Smuzhiyun start = gen_pool_alloc_algo(muram_pool, size, algo, data);
126*4882a593Smuzhiyun if (!start) {
127*4882a593Smuzhiyun kfree(entry);
128*4882a593Smuzhiyun return -ENOMEM;
129*4882a593Smuzhiyun }
130*4882a593Smuzhiyun start = start - GENPOOL_OFFSET;
131*4882a593Smuzhiyun memset_io(cpm_muram_addr(start), 0, size);
132*4882a593Smuzhiyun entry->start = start;
133*4882a593Smuzhiyun entry->size = size;
134*4882a593Smuzhiyun list_add(&entry->head, &muram_block_list);
135*4882a593Smuzhiyun
136*4882a593Smuzhiyun return start;
137*4882a593Smuzhiyun }
138*4882a593Smuzhiyun
139*4882a593Smuzhiyun /*
140*4882a593Smuzhiyun * cpm_muram_alloc - allocate the requested size worth of multi-user ram
141*4882a593Smuzhiyun * @size: number of bytes to allocate
142*4882a593Smuzhiyun * @align: requested alignment, in bytes
143*4882a593Smuzhiyun *
144*4882a593Smuzhiyun * This function returns a non-negative offset into the muram area, or
145*4882a593Smuzhiyun * a negative errno on failure.
146*4882a593Smuzhiyun * Use cpm_dpram_addr() to get the virtual address of the area.
147*4882a593Smuzhiyun * Use cpm_muram_free() to free the allocation.
148*4882a593Smuzhiyun */
cpm_muram_alloc(unsigned long size,unsigned long align)149*4882a593Smuzhiyun s32 cpm_muram_alloc(unsigned long size, unsigned long align)
150*4882a593Smuzhiyun {
151*4882a593Smuzhiyun s32 start;
152*4882a593Smuzhiyun unsigned long flags;
153*4882a593Smuzhiyun struct genpool_data_align muram_pool_data;
154*4882a593Smuzhiyun
155*4882a593Smuzhiyun spin_lock_irqsave(&cpm_muram_lock, flags);
156*4882a593Smuzhiyun muram_pool_data.align = align;
157*4882a593Smuzhiyun start = cpm_muram_alloc_common(size, gen_pool_first_fit_align,
158*4882a593Smuzhiyun &muram_pool_data);
159*4882a593Smuzhiyun spin_unlock_irqrestore(&cpm_muram_lock, flags);
160*4882a593Smuzhiyun return start;
161*4882a593Smuzhiyun }
162*4882a593Smuzhiyun EXPORT_SYMBOL(cpm_muram_alloc);
163*4882a593Smuzhiyun
164*4882a593Smuzhiyun /**
165*4882a593Smuzhiyun * cpm_muram_free - free a chunk of multi-user ram
166*4882a593Smuzhiyun * @offset: The beginning of the chunk as returned by cpm_muram_alloc().
167*4882a593Smuzhiyun */
cpm_muram_free(s32 offset)168*4882a593Smuzhiyun void cpm_muram_free(s32 offset)
169*4882a593Smuzhiyun {
170*4882a593Smuzhiyun unsigned long flags;
171*4882a593Smuzhiyun int size;
172*4882a593Smuzhiyun struct muram_block *tmp;
173*4882a593Smuzhiyun
174*4882a593Smuzhiyun if (offset < 0)
175*4882a593Smuzhiyun return;
176*4882a593Smuzhiyun
177*4882a593Smuzhiyun size = 0;
178*4882a593Smuzhiyun spin_lock_irqsave(&cpm_muram_lock, flags);
179*4882a593Smuzhiyun list_for_each_entry(tmp, &muram_block_list, head) {
180*4882a593Smuzhiyun if (tmp->start == offset) {
181*4882a593Smuzhiyun size = tmp->size;
182*4882a593Smuzhiyun list_del(&tmp->head);
183*4882a593Smuzhiyun kfree(tmp);
184*4882a593Smuzhiyun break;
185*4882a593Smuzhiyun }
186*4882a593Smuzhiyun }
187*4882a593Smuzhiyun gen_pool_free(muram_pool, offset + GENPOOL_OFFSET, size);
188*4882a593Smuzhiyun spin_unlock_irqrestore(&cpm_muram_lock, flags);
189*4882a593Smuzhiyun }
190*4882a593Smuzhiyun EXPORT_SYMBOL(cpm_muram_free);
191*4882a593Smuzhiyun
192*4882a593Smuzhiyun /*
193*4882a593Smuzhiyun * cpm_muram_alloc_fixed - reserve a specific region of multi-user ram
194*4882a593Smuzhiyun * @offset: offset of allocation start address
195*4882a593Smuzhiyun * @size: number of bytes to allocate
196*4882a593Smuzhiyun * This function returns @offset if the area was available, a negative
197*4882a593Smuzhiyun * errno otherwise.
198*4882a593Smuzhiyun * Use cpm_dpram_addr() to get the virtual address of the area.
199*4882a593Smuzhiyun * Use cpm_muram_free() to free the allocation.
200*4882a593Smuzhiyun */
cpm_muram_alloc_fixed(unsigned long offset,unsigned long size)201*4882a593Smuzhiyun s32 cpm_muram_alloc_fixed(unsigned long offset, unsigned long size)
202*4882a593Smuzhiyun {
203*4882a593Smuzhiyun s32 start;
204*4882a593Smuzhiyun unsigned long flags;
205*4882a593Smuzhiyun struct genpool_data_fixed muram_pool_data_fixed;
206*4882a593Smuzhiyun
207*4882a593Smuzhiyun spin_lock_irqsave(&cpm_muram_lock, flags);
208*4882a593Smuzhiyun muram_pool_data_fixed.offset = offset + GENPOOL_OFFSET;
209*4882a593Smuzhiyun start = cpm_muram_alloc_common(size, gen_pool_fixed_alloc,
210*4882a593Smuzhiyun &muram_pool_data_fixed);
211*4882a593Smuzhiyun spin_unlock_irqrestore(&cpm_muram_lock, flags);
212*4882a593Smuzhiyun return start;
213*4882a593Smuzhiyun }
214*4882a593Smuzhiyun EXPORT_SYMBOL(cpm_muram_alloc_fixed);
215*4882a593Smuzhiyun
216*4882a593Smuzhiyun /**
217*4882a593Smuzhiyun * cpm_muram_addr - turn a muram offset into a virtual address
218*4882a593Smuzhiyun * @offset: muram offset to convert
219*4882a593Smuzhiyun */
cpm_muram_addr(unsigned long offset)220*4882a593Smuzhiyun void __iomem *cpm_muram_addr(unsigned long offset)
221*4882a593Smuzhiyun {
222*4882a593Smuzhiyun return muram_vbase + offset;
223*4882a593Smuzhiyun }
224*4882a593Smuzhiyun EXPORT_SYMBOL(cpm_muram_addr);
225*4882a593Smuzhiyun
cpm_muram_offset(void __iomem * addr)226*4882a593Smuzhiyun unsigned long cpm_muram_offset(void __iomem *addr)
227*4882a593Smuzhiyun {
228*4882a593Smuzhiyun return addr - (void __iomem *)muram_vbase;
229*4882a593Smuzhiyun }
230*4882a593Smuzhiyun EXPORT_SYMBOL(cpm_muram_offset);
231*4882a593Smuzhiyun
232*4882a593Smuzhiyun /**
233*4882a593Smuzhiyun * cpm_muram_dma - turn a muram virtual address into a DMA address
234*4882a593Smuzhiyun * @offset: virtual address from cpm_muram_addr() to convert
235*4882a593Smuzhiyun */
cpm_muram_dma(void __iomem * addr)236*4882a593Smuzhiyun dma_addr_t cpm_muram_dma(void __iomem *addr)
237*4882a593Smuzhiyun {
238*4882a593Smuzhiyun return muram_pbase + ((u8 __iomem *)addr - muram_vbase);
239*4882a593Smuzhiyun }
240*4882a593Smuzhiyun EXPORT_SYMBOL(cpm_muram_dma);
241