1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0+
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Device tree based initialization code for reserved memory.
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * Copyright (c) 2013, 2015 The Linux Foundation. All Rights Reserved.
6*4882a593Smuzhiyun * Copyright (c) 2013,2014 Samsung Electronics Co., Ltd.
7*4882a593Smuzhiyun * http://www.samsung.com
8*4882a593Smuzhiyun * Author: Marek Szyprowski <m.szyprowski@samsung.com>
9*4882a593Smuzhiyun * Author: Josh Cartwright <joshc@codeaurora.org>
10*4882a593Smuzhiyun */
11*4882a593Smuzhiyun
12*4882a593Smuzhiyun #define pr_fmt(fmt) "OF: reserved mem: " fmt
13*4882a593Smuzhiyun
14*4882a593Smuzhiyun #include <linux/err.h>
15*4882a593Smuzhiyun #include <linux/of.h>
16*4882a593Smuzhiyun #include <linux/of_fdt.h>
17*4882a593Smuzhiyun #include <linux/of_platform.h>
18*4882a593Smuzhiyun #include <linux/mm.h>
19*4882a593Smuzhiyun #include <linux/sizes.h>
20*4882a593Smuzhiyun #include <linux/of_reserved_mem.h>
21*4882a593Smuzhiyun #include <linux/sort.h>
22*4882a593Smuzhiyun #include <linux/slab.h>
23*4882a593Smuzhiyun #include <linux/memblock.h>
24*4882a593Smuzhiyun
25*4882a593Smuzhiyun #define MAX_RESERVED_REGIONS 64
26*4882a593Smuzhiyun static struct reserved_mem reserved_mem[MAX_RESERVED_REGIONS];
27*4882a593Smuzhiyun static int reserved_mem_count;
28*4882a593Smuzhiyun
early_init_dt_alloc_reserved_memory_arch(phys_addr_t size,phys_addr_t align,phys_addr_t start,phys_addr_t end,bool nomap,phys_addr_t * res_base)29*4882a593Smuzhiyun static int __init early_init_dt_alloc_reserved_memory_arch(phys_addr_t size,
30*4882a593Smuzhiyun phys_addr_t align, phys_addr_t start, phys_addr_t end, bool nomap,
31*4882a593Smuzhiyun phys_addr_t *res_base)
32*4882a593Smuzhiyun {
33*4882a593Smuzhiyun phys_addr_t base;
34*4882a593Smuzhiyun
35*4882a593Smuzhiyun end = !end ? MEMBLOCK_ALLOC_ANYWHERE : end;
36*4882a593Smuzhiyun align = !align ? SMP_CACHE_BYTES : align;
37*4882a593Smuzhiyun base = memblock_find_in_range(start, end, size, align);
38*4882a593Smuzhiyun if (!base)
39*4882a593Smuzhiyun return -ENOMEM;
40*4882a593Smuzhiyun
41*4882a593Smuzhiyun *res_base = base;
42*4882a593Smuzhiyun if (nomap)
43*4882a593Smuzhiyun return memblock_remove(base, size);
44*4882a593Smuzhiyun
45*4882a593Smuzhiyun return memblock_reserve(base, size);
46*4882a593Smuzhiyun }
47*4882a593Smuzhiyun
48*4882a593Smuzhiyun /**
49*4882a593Smuzhiyun * fdt_reserved_mem_save_node() - save fdt node for second pass initialization
50*4882a593Smuzhiyun */
fdt_reserved_mem_save_node(unsigned long node,const char * uname,phys_addr_t base,phys_addr_t size)51*4882a593Smuzhiyun void __init fdt_reserved_mem_save_node(unsigned long node, const char *uname,
52*4882a593Smuzhiyun phys_addr_t base, phys_addr_t size)
53*4882a593Smuzhiyun {
54*4882a593Smuzhiyun struct reserved_mem *rmem = &reserved_mem[reserved_mem_count];
55*4882a593Smuzhiyun
56*4882a593Smuzhiyun if (reserved_mem_count == ARRAY_SIZE(reserved_mem)) {
57*4882a593Smuzhiyun pr_err("not enough space for all defined regions.\n");
58*4882a593Smuzhiyun return;
59*4882a593Smuzhiyun }
60*4882a593Smuzhiyun
61*4882a593Smuzhiyun rmem->fdt_node = node;
62*4882a593Smuzhiyun rmem->name = uname;
63*4882a593Smuzhiyun rmem->base = base;
64*4882a593Smuzhiyun rmem->size = size;
65*4882a593Smuzhiyun
66*4882a593Smuzhiyun reserved_mem_count++;
67*4882a593Smuzhiyun return;
68*4882a593Smuzhiyun }
69*4882a593Smuzhiyun
70*4882a593Smuzhiyun /**
71*4882a593Smuzhiyun * __reserved_mem_alloc_size() - allocate reserved memory described by
72*4882a593Smuzhiyun * 'size', 'alignment' and 'alloc-ranges' properties.
73*4882a593Smuzhiyun */
__reserved_mem_alloc_size(unsigned long node,const char * uname,phys_addr_t * res_base,phys_addr_t * res_size)74*4882a593Smuzhiyun static int __init __reserved_mem_alloc_size(unsigned long node,
75*4882a593Smuzhiyun const char *uname, phys_addr_t *res_base, phys_addr_t *res_size)
76*4882a593Smuzhiyun {
77*4882a593Smuzhiyun int t_len = (dt_root_addr_cells + dt_root_size_cells) * sizeof(__be32);
78*4882a593Smuzhiyun phys_addr_t start = 0, end = 0;
79*4882a593Smuzhiyun phys_addr_t base = 0, align = 0, size;
80*4882a593Smuzhiyun int len;
81*4882a593Smuzhiyun const __be32 *prop;
82*4882a593Smuzhiyun bool nomap;
83*4882a593Smuzhiyun int ret;
84*4882a593Smuzhiyun
85*4882a593Smuzhiyun prop = of_get_flat_dt_prop(node, "size", &len);
86*4882a593Smuzhiyun if (!prop)
87*4882a593Smuzhiyun return -EINVAL;
88*4882a593Smuzhiyun
89*4882a593Smuzhiyun if (len != dt_root_size_cells * sizeof(__be32)) {
90*4882a593Smuzhiyun pr_err("invalid size property in '%s' node.\n", uname);
91*4882a593Smuzhiyun return -EINVAL;
92*4882a593Smuzhiyun }
93*4882a593Smuzhiyun size = dt_mem_next_cell(dt_root_size_cells, &prop);
94*4882a593Smuzhiyun
95*4882a593Smuzhiyun prop = of_get_flat_dt_prop(node, "alignment", &len);
96*4882a593Smuzhiyun if (prop) {
97*4882a593Smuzhiyun if (len != dt_root_addr_cells * sizeof(__be32)) {
98*4882a593Smuzhiyun pr_err("invalid alignment property in '%s' node.\n",
99*4882a593Smuzhiyun uname);
100*4882a593Smuzhiyun return -EINVAL;
101*4882a593Smuzhiyun }
102*4882a593Smuzhiyun align = dt_mem_next_cell(dt_root_addr_cells, &prop);
103*4882a593Smuzhiyun }
104*4882a593Smuzhiyun
105*4882a593Smuzhiyun nomap = of_get_flat_dt_prop(node, "no-map", NULL) != NULL;
106*4882a593Smuzhiyun
107*4882a593Smuzhiyun /* Need adjust the alignment to satisfy the CMA requirement */
108*4882a593Smuzhiyun if (IS_ENABLED(CONFIG_CMA)
109*4882a593Smuzhiyun && of_flat_dt_is_compatible(node, "shared-dma-pool")
110*4882a593Smuzhiyun && of_get_flat_dt_prop(node, "reusable", NULL)
111*4882a593Smuzhiyun && !nomap) {
112*4882a593Smuzhiyun unsigned long order =
113*4882a593Smuzhiyun max_t(unsigned long, MAX_ORDER - 1, pageblock_order);
114*4882a593Smuzhiyun
115*4882a593Smuzhiyun align = max(align, (phys_addr_t)PAGE_SIZE << order);
116*4882a593Smuzhiyun }
117*4882a593Smuzhiyun
118*4882a593Smuzhiyun prop = of_get_flat_dt_prop(node, "alloc-ranges", &len);
119*4882a593Smuzhiyun if (prop) {
120*4882a593Smuzhiyun
121*4882a593Smuzhiyun if (len % t_len != 0) {
122*4882a593Smuzhiyun pr_err("invalid alloc-ranges property in '%s', skipping node.\n",
123*4882a593Smuzhiyun uname);
124*4882a593Smuzhiyun return -EINVAL;
125*4882a593Smuzhiyun }
126*4882a593Smuzhiyun
127*4882a593Smuzhiyun base = 0;
128*4882a593Smuzhiyun
129*4882a593Smuzhiyun while (len > 0) {
130*4882a593Smuzhiyun start = dt_mem_next_cell(dt_root_addr_cells, &prop);
131*4882a593Smuzhiyun end = start + dt_mem_next_cell(dt_root_size_cells,
132*4882a593Smuzhiyun &prop);
133*4882a593Smuzhiyun
134*4882a593Smuzhiyun ret = early_init_dt_alloc_reserved_memory_arch(size,
135*4882a593Smuzhiyun align, start, end, nomap, &base);
136*4882a593Smuzhiyun if (ret == 0) {
137*4882a593Smuzhiyun pr_debug("allocated memory for '%s' node: base %pa, size %lu MiB\n",
138*4882a593Smuzhiyun uname, &base,
139*4882a593Smuzhiyun (unsigned long)(size / SZ_1M));
140*4882a593Smuzhiyun break;
141*4882a593Smuzhiyun }
142*4882a593Smuzhiyun len -= t_len;
143*4882a593Smuzhiyun }
144*4882a593Smuzhiyun
145*4882a593Smuzhiyun } else {
146*4882a593Smuzhiyun ret = early_init_dt_alloc_reserved_memory_arch(size, align,
147*4882a593Smuzhiyun 0, 0, nomap, &base);
148*4882a593Smuzhiyun if (ret == 0)
149*4882a593Smuzhiyun pr_debug("allocated memory for '%s' node: base %pa, size %lu MiB\n",
150*4882a593Smuzhiyun uname, &base, (unsigned long)(size / SZ_1M));
151*4882a593Smuzhiyun }
152*4882a593Smuzhiyun
153*4882a593Smuzhiyun if (base == 0) {
154*4882a593Smuzhiyun pr_info("failed to allocate memory for node '%s'\n", uname);
155*4882a593Smuzhiyun return -ENOMEM;
156*4882a593Smuzhiyun }
157*4882a593Smuzhiyun
158*4882a593Smuzhiyun *res_base = base;
159*4882a593Smuzhiyun *res_size = size;
160*4882a593Smuzhiyun
161*4882a593Smuzhiyun return 0;
162*4882a593Smuzhiyun }
163*4882a593Smuzhiyun
164*4882a593Smuzhiyun static const struct of_device_id __rmem_of_table_sentinel
165*4882a593Smuzhiyun __used __section("__reservedmem_of_table_end");
166*4882a593Smuzhiyun
167*4882a593Smuzhiyun /**
168*4882a593Smuzhiyun * __reserved_mem_init_node() - call region specific reserved memory init code
169*4882a593Smuzhiyun */
__reserved_mem_init_node(struct reserved_mem * rmem)170*4882a593Smuzhiyun static int __init __reserved_mem_init_node(struct reserved_mem *rmem)
171*4882a593Smuzhiyun {
172*4882a593Smuzhiyun extern const struct of_device_id __reservedmem_of_table[];
173*4882a593Smuzhiyun const struct of_device_id *i;
174*4882a593Smuzhiyun int ret = -ENOENT;
175*4882a593Smuzhiyun
176*4882a593Smuzhiyun for (i = __reservedmem_of_table; i < &__rmem_of_table_sentinel; i++) {
177*4882a593Smuzhiyun reservedmem_of_init_fn initfn = i->data;
178*4882a593Smuzhiyun const char *compat = i->compatible;
179*4882a593Smuzhiyun
180*4882a593Smuzhiyun if (!of_flat_dt_is_compatible(rmem->fdt_node, compat))
181*4882a593Smuzhiyun continue;
182*4882a593Smuzhiyun
183*4882a593Smuzhiyun ret = initfn(rmem);
184*4882a593Smuzhiyun if (ret == 0) {
185*4882a593Smuzhiyun pr_info("initialized node %s, compatible id %s\n",
186*4882a593Smuzhiyun rmem->name, compat);
187*4882a593Smuzhiyun break;
188*4882a593Smuzhiyun }
189*4882a593Smuzhiyun }
190*4882a593Smuzhiyun return ret;
191*4882a593Smuzhiyun }
192*4882a593Smuzhiyun
__rmem_cmp(const void * a,const void * b)193*4882a593Smuzhiyun static int __init __rmem_cmp(const void *a, const void *b)
194*4882a593Smuzhiyun {
195*4882a593Smuzhiyun const struct reserved_mem *ra = a, *rb = b;
196*4882a593Smuzhiyun
197*4882a593Smuzhiyun if (ra->base < rb->base)
198*4882a593Smuzhiyun return -1;
199*4882a593Smuzhiyun
200*4882a593Smuzhiyun if (ra->base > rb->base)
201*4882a593Smuzhiyun return 1;
202*4882a593Smuzhiyun
203*4882a593Smuzhiyun /*
204*4882a593Smuzhiyun * Put the dynamic allocations (address == 0, size == 0) before static
205*4882a593Smuzhiyun * allocations at address 0x0 so that overlap detection works
206*4882a593Smuzhiyun * correctly.
207*4882a593Smuzhiyun */
208*4882a593Smuzhiyun if (ra->size < rb->size)
209*4882a593Smuzhiyun return -1;
210*4882a593Smuzhiyun if (ra->size > rb->size)
211*4882a593Smuzhiyun return 1;
212*4882a593Smuzhiyun
213*4882a593Smuzhiyun return 0;
214*4882a593Smuzhiyun }
215*4882a593Smuzhiyun
__rmem_check_for_overlap(void)216*4882a593Smuzhiyun static void __init __rmem_check_for_overlap(void)
217*4882a593Smuzhiyun {
218*4882a593Smuzhiyun int i;
219*4882a593Smuzhiyun
220*4882a593Smuzhiyun if (reserved_mem_count < 2)
221*4882a593Smuzhiyun return;
222*4882a593Smuzhiyun
223*4882a593Smuzhiyun sort(reserved_mem, reserved_mem_count, sizeof(reserved_mem[0]),
224*4882a593Smuzhiyun __rmem_cmp, NULL);
225*4882a593Smuzhiyun for (i = 0; i < reserved_mem_count - 1; i++) {
226*4882a593Smuzhiyun struct reserved_mem *this, *next;
227*4882a593Smuzhiyun
228*4882a593Smuzhiyun this = &reserved_mem[i];
229*4882a593Smuzhiyun next = &reserved_mem[i + 1];
230*4882a593Smuzhiyun
231*4882a593Smuzhiyun if (this->base + this->size > next->base) {
232*4882a593Smuzhiyun phys_addr_t this_end, next_end;
233*4882a593Smuzhiyun
234*4882a593Smuzhiyun this_end = this->base + this->size;
235*4882a593Smuzhiyun next_end = next->base + next->size;
236*4882a593Smuzhiyun pr_err("OVERLAP DETECTED!\n%s (%pa--%pa) overlaps with %s (%pa--%pa)\n",
237*4882a593Smuzhiyun this->name, &this->base, &this_end,
238*4882a593Smuzhiyun next->name, &next->base, &next_end);
239*4882a593Smuzhiyun }
240*4882a593Smuzhiyun }
241*4882a593Smuzhiyun }
242*4882a593Smuzhiyun
243*4882a593Smuzhiyun /**
244*4882a593Smuzhiyun * fdt_init_reserved_mem() - allocate and init all saved reserved memory regions
245*4882a593Smuzhiyun */
fdt_init_reserved_mem(void)246*4882a593Smuzhiyun void __init fdt_init_reserved_mem(void)
247*4882a593Smuzhiyun {
248*4882a593Smuzhiyun int i;
249*4882a593Smuzhiyun
250*4882a593Smuzhiyun /* check for overlapping reserved regions */
251*4882a593Smuzhiyun __rmem_check_for_overlap();
252*4882a593Smuzhiyun
253*4882a593Smuzhiyun for (i = 0; i < reserved_mem_count; i++) {
254*4882a593Smuzhiyun struct reserved_mem *rmem = &reserved_mem[i];
255*4882a593Smuzhiyun unsigned long node = rmem->fdt_node;
256*4882a593Smuzhiyun int len;
257*4882a593Smuzhiyun const __be32 *prop;
258*4882a593Smuzhiyun int err = 0;
259*4882a593Smuzhiyun bool nomap;
260*4882a593Smuzhiyun
261*4882a593Smuzhiyun nomap = of_get_flat_dt_prop(node, "no-map", NULL) != NULL;
262*4882a593Smuzhiyun prop = of_get_flat_dt_prop(node, "phandle", &len);
263*4882a593Smuzhiyun if (!prop)
264*4882a593Smuzhiyun prop = of_get_flat_dt_prop(node, "linux,phandle", &len);
265*4882a593Smuzhiyun if (prop)
266*4882a593Smuzhiyun rmem->phandle = of_read_number(prop, len/4);
267*4882a593Smuzhiyun
268*4882a593Smuzhiyun if (rmem->size == 0)
269*4882a593Smuzhiyun err = __reserved_mem_alloc_size(node, rmem->name,
270*4882a593Smuzhiyun &rmem->base, &rmem->size);
271*4882a593Smuzhiyun if (err == 0) {
272*4882a593Smuzhiyun err = __reserved_mem_init_node(rmem);
273*4882a593Smuzhiyun if (err != 0 && err != -ENOENT) {
274*4882a593Smuzhiyun pr_info("node %s compatible matching fail\n",
275*4882a593Smuzhiyun rmem->name);
276*4882a593Smuzhiyun memblock_free(rmem->base, rmem->size);
277*4882a593Smuzhiyun if (nomap)
278*4882a593Smuzhiyun memblock_add(rmem->base, rmem->size);
279*4882a593Smuzhiyun }
280*4882a593Smuzhiyun }
281*4882a593Smuzhiyun }
282*4882a593Smuzhiyun }
283*4882a593Smuzhiyun
__find_rmem(struct device_node * node)284*4882a593Smuzhiyun static inline struct reserved_mem *__find_rmem(struct device_node *node)
285*4882a593Smuzhiyun {
286*4882a593Smuzhiyun unsigned int i;
287*4882a593Smuzhiyun
288*4882a593Smuzhiyun if (!node->phandle)
289*4882a593Smuzhiyun return NULL;
290*4882a593Smuzhiyun
291*4882a593Smuzhiyun for (i = 0; i < reserved_mem_count; i++)
292*4882a593Smuzhiyun if (reserved_mem[i].phandle == node->phandle)
293*4882a593Smuzhiyun return &reserved_mem[i];
294*4882a593Smuzhiyun return NULL;
295*4882a593Smuzhiyun }
296*4882a593Smuzhiyun
297*4882a593Smuzhiyun struct rmem_assigned_device {
298*4882a593Smuzhiyun struct device *dev;
299*4882a593Smuzhiyun struct reserved_mem *rmem;
300*4882a593Smuzhiyun struct list_head list;
301*4882a593Smuzhiyun };
302*4882a593Smuzhiyun
303*4882a593Smuzhiyun static LIST_HEAD(of_rmem_assigned_device_list);
304*4882a593Smuzhiyun static DEFINE_MUTEX(of_rmem_assigned_device_mutex);
305*4882a593Smuzhiyun
306*4882a593Smuzhiyun /**
307*4882a593Smuzhiyun * of_reserved_mem_device_init_by_idx() - assign reserved memory region to
308*4882a593Smuzhiyun * given device
309*4882a593Smuzhiyun * @dev: Pointer to the device to configure
310*4882a593Smuzhiyun * @np: Pointer to the device_node with 'reserved-memory' property
311*4882a593Smuzhiyun * @idx: Index of selected region
312*4882a593Smuzhiyun *
313*4882a593Smuzhiyun * This function assigns respective DMA-mapping operations based on reserved
314*4882a593Smuzhiyun * memory region specified by 'memory-region' property in @np node to the @dev
315*4882a593Smuzhiyun * device. When driver needs to use more than one reserved memory region, it
316*4882a593Smuzhiyun * should allocate child devices and initialize regions by name for each of
317*4882a593Smuzhiyun * child device.
318*4882a593Smuzhiyun *
319*4882a593Smuzhiyun * Returns error code or zero on success.
320*4882a593Smuzhiyun */
of_reserved_mem_device_init_by_idx(struct device * dev,struct device_node * np,int idx)321*4882a593Smuzhiyun int of_reserved_mem_device_init_by_idx(struct device *dev,
322*4882a593Smuzhiyun struct device_node *np, int idx)
323*4882a593Smuzhiyun {
324*4882a593Smuzhiyun struct rmem_assigned_device *rd;
325*4882a593Smuzhiyun struct device_node *target;
326*4882a593Smuzhiyun struct reserved_mem *rmem;
327*4882a593Smuzhiyun int ret;
328*4882a593Smuzhiyun
329*4882a593Smuzhiyun if (!np || !dev)
330*4882a593Smuzhiyun return -EINVAL;
331*4882a593Smuzhiyun
332*4882a593Smuzhiyun target = of_parse_phandle(np, "memory-region", idx);
333*4882a593Smuzhiyun if (!target)
334*4882a593Smuzhiyun return -ENODEV;
335*4882a593Smuzhiyun
336*4882a593Smuzhiyun if (!of_device_is_available(target)) {
337*4882a593Smuzhiyun of_node_put(target);
338*4882a593Smuzhiyun return 0;
339*4882a593Smuzhiyun }
340*4882a593Smuzhiyun
341*4882a593Smuzhiyun rmem = __find_rmem(target);
342*4882a593Smuzhiyun of_node_put(target);
343*4882a593Smuzhiyun
344*4882a593Smuzhiyun if (!rmem || !rmem->ops || !rmem->ops->device_init)
345*4882a593Smuzhiyun return -EINVAL;
346*4882a593Smuzhiyun
347*4882a593Smuzhiyun rd = kmalloc(sizeof(struct rmem_assigned_device), GFP_KERNEL);
348*4882a593Smuzhiyun if (!rd)
349*4882a593Smuzhiyun return -ENOMEM;
350*4882a593Smuzhiyun
351*4882a593Smuzhiyun ret = rmem->ops->device_init(rmem, dev);
352*4882a593Smuzhiyun if (ret == 0) {
353*4882a593Smuzhiyun rd->dev = dev;
354*4882a593Smuzhiyun rd->rmem = rmem;
355*4882a593Smuzhiyun
356*4882a593Smuzhiyun mutex_lock(&of_rmem_assigned_device_mutex);
357*4882a593Smuzhiyun list_add(&rd->list, &of_rmem_assigned_device_list);
358*4882a593Smuzhiyun mutex_unlock(&of_rmem_assigned_device_mutex);
359*4882a593Smuzhiyun
360*4882a593Smuzhiyun dev_info(dev, "assigned reserved memory node %s\n", rmem->name);
361*4882a593Smuzhiyun } else {
362*4882a593Smuzhiyun kfree(rd);
363*4882a593Smuzhiyun }
364*4882a593Smuzhiyun
365*4882a593Smuzhiyun return ret;
366*4882a593Smuzhiyun }
367*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(of_reserved_mem_device_init_by_idx);
368*4882a593Smuzhiyun
369*4882a593Smuzhiyun /**
370*4882a593Smuzhiyun * of_reserved_mem_device_init_by_name() - assign named reserved memory region
371*4882a593Smuzhiyun * to given device
372*4882a593Smuzhiyun * @dev: pointer to the device to configure
373*4882a593Smuzhiyun * @np: pointer to the device node with 'memory-region' property
374*4882a593Smuzhiyun * @name: name of the selected memory region
375*4882a593Smuzhiyun *
376*4882a593Smuzhiyun * Returns: 0 on success or a negative error-code on failure.
377*4882a593Smuzhiyun */
of_reserved_mem_device_init_by_name(struct device * dev,struct device_node * np,const char * name)378*4882a593Smuzhiyun int of_reserved_mem_device_init_by_name(struct device *dev,
379*4882a593Smuzhiyun struct device_node *np,
380*4882a593Smuzhiyun const char *name)
381*4882a593Smuzhiyun {
382*4882a593Smuzhiyun int idx = of_property_match_string(np, "memory-region-names", name);
383*4882a593Smuzhiyun
384*4882a593Smuzhiyun return of_reserved_mem_device_init_by_idx(dev, np, idx);
385*4882a593Smuzhiyun }
386*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(of_reserved_mem_device_init_by_name);
387*4882a593Smuzhiyun
388*4882a593Smuzhiyun /**
389*4882a593Smuzhiyun * of_reserved_mem_device_release() - release reserved memory device structures
390*4882a593Smuzhiyun * @dev: Pointer to the device to deconfigure
391*4882a593Smuzhiyun *
392*4882a593Smuzhiyun * This function releases structures allocated for memory region handling for
393*4882a593Smuzhiyun * the given device.
394*4882a593Smuzhiyun */
of_reserved_mem_device_release(struct device * dev)395*4882a593Smuzhiyun void of_reserved_mem_device_release(struct device *dev)
396*4882a593Smuzhiyun {
397*4882a593Smuzhiyun struct rmem_assigned_device *rd, *tmp;
398*4882a593Smuzhiyun LIST_HEAD(release_list);
399*4882a593Smuzhiyun
400*4882a593Smuzhiyun mutex_lock(&of_rmem_assigned_device_mutex);
401*4882a593Smuzhiyun list_for_each_entry_safe(rd, tmp, &of_rmem_assigned_device_list, list) {
402*4882a593Smuzhiyun if (rd->dev == dev)
403*4882a593Smuzhiyun list_move_tail(&rd->list, &release_list);
404*4882a593Smuzhiyun }
405*4882a593Smuzhiyun mutex_unlock(&of_rmem_assigned_device_mutex);
406*4882a593Smuzhiyun
407*4882a593Smuzhiyun list_for_each_entry_safe(rd, tmp, &release_list, list) {
408*4882a593Smuzhiyun if (rd->rmem && rd->rmem->ops && rd->rmem->ops->device_release)
409*4882a593Smuzhiyun rd->rmem->ops->device_release(rd->rmem, dev);
410*4882a593Smuzhiyun
411*4882a593Smuzhiyun kfree(rd);
412*4882a593Smuzhiyun }
413*4882a593Smuzhiyun }
414*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(of_reserved_mem_device_release);
415*4882a593Smuzhiyun
416*4882a593Smuzhiyun /**
417*4882a593Smuzhiyun * of_reserved_mem_lookup() - acquire reserved_mem from a device node
418*4882a593Smuzhiyun * @np: node pointer of the desired reserved-memory region
419*4882a593Smuzhiyun *
420*4882a593Smuzhiyun * This function allows drivers to acquire a reference to the reserved_mem
421*4882a593Smuzhiyun * struct based on a device node handle.
422*4882a593Smuzhiyun *
423*4882a593Smuzhiyun * Returns a reserved_mem reference, or NULL on error.
424*4882a593Smuzhiyun */
of_reserved_mem_lookup(struct device_node * np)425*4882a593Smuzhiyun struct reserved_mem *of_reserved_mem_lookup(struct device_node *np)
426*4882a593Smuzhiyun {
427*4882a593Smuzhiyun const char *name;
428*4882a593Smuzhiyun int i;
429*4882a593Smuzhiyun
430*4882a593Smuzhiyun if (!np->full_name)
431*4882a593Smuzhiyun return NULL;
432*4882a593Smuzhiyun
433*4882a593Smuzhiyun name = kbasename(np->full_name);
434*4882a593Smuzhiyun for (i = 0; i < reserved_mem_count; i++)
435*4882a593Smuzhiyun if (!strcmp(reserved_mem[i].name, name))
436*4882a593Smuzhiyun return &reserved_mem[i];
437*4882a593Smuzhiyun
438*4882a593Smuzhiyun return NULL;
439*4882a593Smuzhiyun }
440*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(of_reserved_mem_lookup);
441