1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * powerpc code to implement the kexec_file_load syscall
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * Copyright (C) 2004 Adam Litke (agl@us.ibm.com)
6*4882a593Smuzhiyun * Copyright (C) 2004 IBM Corp.
7*4882a593Smuzhiyun * Copyright (C) 2004,2005 Milton D Miller II, IBM Corporation
8*4882a593Smuzhiyun * Copyright (C) 2005 R Sharada (sharada@in.ibm.com)
9*4882a593Smuzhiyun * Copyright (C) 2006 Mohan Kumar M (mohan@in.ibm.com)
10*4882a593Smuzhiyun * Copyright (C) 2020 IBM Corporation
11*4882a593Smuzhiyun *
12*4882a593Smuzhiyun * Based on kexec-tools' kexec-ppc64.c, fs2dt.c.
13*4882a593Smuzhiyun * Heavily modified for the kernel by
14*4882a593Smuzhiyun * Hari Bathini, IBM Corporation.
15*4882a593Smuzhiyun */
16*4882a593Smuzhiyun
17*4882a593Smuzhiyun #define pr_fmt(fmt) "kexec ranges: " fmt
18*4882a593Smuzhiyun
19*4882a593Smuzhiyun #include <linux/sort.h>
20*4882a593Smuzhiyun #include <linux/kexec.h>
21*4882a593Smuzhiyun #include <linux/of_device.h>
22*4882a593Smuzhiyun #include <linux/slab.h>
23*4882a593Smuzhiyun #include <asm/sections.h>
24*4882a593Smuzhiyun #include <asm/kexec_ranges.h>
25*4882a593Smuzhiyun
26*4882a593Smuzhiyun /**
27*4882a593Smuzhiyun * get_max_nr_ranges - Get the max no. of ranges crash_mem structure
28*4882a593Smuzhiyun * could hold, given the size allocated for it.
29*4882a593Smuzhiyun * @size: Allocation size of crash_mem structure.
30*4882a593Smuzhiyun *
31*4882a593Smuzhiyun * Returns the maximum no. of ranges.
32*4882a593Smuzhiyun */
get_max_nr_ranges(size_t size)33*4882a593Smuzhiyun static inline unsigned int get_max_nr_ranges(size_t size)
34*4882a593Smuzhiyun {
35*4882a593Smuzhiyun return ((size - sizeof(struct crash_mem)) /
36*4882a593Smuzhiyun sizeof(struct crash_mem_range));
37*4882a593Smuzhiyun }
38*4882a593Smuzhiyun
39*4882a593Smuzhiyun /**
40*4882a593Smuzhiyun * get_mem_rngs_size - Get the allocated size of mem_rngs based on
41*4882a593Smuzhiyun * max_nr_ranges and chunk size.
42*4882a593Smuzhiyun * @mem_rngs: Memory ranges.
43*4882a593Smuzhiyun *
44*4882a593Smuzhiyun * Returns the maximum size of @mem_rngs.
45*4882a593Smuzhiyun */
get_mem_rngs_size(struct crash_mem * mem_rngs)46*4882a593Smuzhiyun static inline size_t get_mem_rngs_size(struct crash_mem *mem_rngs)
47*4882a593Smuzhiyun {
48*4882a593Smuzhiyun size_t size;
49*4882a593Smuzhiyun
50*4882a593Smuzhiyun if (!mem_rngs)
51*4882a593Smuzhiyun return 0;
52*4882a593Smuzhiyun
53*4882a593Smuzhiyun size = (sizeof(struct crash_mem) +
54*4882a593Smuzhiyun (mem_rngs->max_nr_ranges * sizeof(struct crash_mem_range)));
55*4882a593Smuzhiyun
56*4882a593Smuzhiyun /*
57*4882a593Smuzhiyun * Memory is allocated in size multiple of MEM_RANGE_CHUNK_SZ.
58*4882a593Smuzhiyun * So, align to get the actual length.
59*4882a593Smuzhiyun */
60*4882a593Smuzhiyun return ALIGN(size, MEM_RANGE_CHUNK_SZ);
61*4882a593Smuzhiyun }
62*4882a593Smuzhiyun
63*4882a593Smuzhiyun /**
64*4882a593Smuzhiyun * __add_mem_range - add a memory range to memory ranges list.
65*4882a593Smuzhiyun * @mem_ranges: Range list to add the memory range to.
66*4882a593Smuzhiyun * @base: Base address of the range to add.
67*4882a593Smuzhiyun * @size: Size of the memory range to add.
68*4882a593Smuzhiyun *
69*4882a593Smuzhiyun * (Re)allocates memory, if needed.
70*4882a593Smuzhiyun *
71*4882a593Smuzhiyun * Returns 0 on success, negative errno on error.
72*4882a593Smuzhiyun */
__add_mem_range(struct crash_mem ** mem_ranges,u64 base,u64 size)73*4882a593Smuzhiyun static int __add_mem_range(struct crash_mem **mem_ranges, u64 base, u64 size)
74*4882a593Smuzhiyun {
75*4882a593Smuzhiyun struct crash_mem *mem_rngs = *mem_ranges;
76*4882a593Smuzhiyun
77*4882a593Smuzhiyun if (!mem_rngs || (mem_rngs->nr_ranges == mem_rngs->max_nr_ranges)) {
78*4882a593Smuzhiyun mem_rngs = realloc_mem_ranges(mem_ranges);
79*4882a593Smuzhiyun if (!mem_rngs)
80*4882a593Smuzhiyun return -ENOMEM;
81*4882a593Smuzhiyun }
82*4882a593Smuzhiyun
83*4882a593Smuzhiyun mem_rngs->ranges[mem_rngs->nr_ranges].start = base;
84*4882a593Smuzhiyun mem_rngs->ranges[mem_rngs->nr_ranges].end = base + size - 1;
85*4882a593Smuzhiyun pr_debug("Added memory range [%#016llx - %#016llx] at index %d\n",
86*4882a593Smuzhiyun base, base + size - 1, mem_rngs->nr_ranges);
87*4882a593Smuzhiyun mem_rngs->nr_ranges++;
88*4882a593Smuzhiyun return 0;
89*4882a593Smuzhiyun }
90*4882a593Smuzhiyun
91*4882a593Smuzhiyun /**
92*4882a593Smuzhiyun * __merge_memory_ranges - Merges the given memory ranges list.
93*4882a593Smuzhiyun * @mem_rngs: Range list to merge.
94*4882a593Smuzhiyun *
95*4882a593Smuzhiyun * Assumes a sorted range list.
96*4882a593Smuzhiyun *
97*4882a593Smuzhiyun * Returns nothing.
98*4882a593Smuzhiyun */
__merge_memory_ranges(struct crash_mem * mem_rngs)99*4882a593Smuzhiyun static void __merge_memory_ranges(struct crash_mem *mem_rngs)
100*4882a593Smuzhiyun {
101*4882a593Smuzhiyun struct crash_mem_range *ranges;
102*4882a593Smuzhiyun int i, idx;
103*4882a593Smuzhiyun
104*4882a593Smuzhiyun if (!mem_rngs)
105*4882a593Smuzhiyun return;
106*4882a593Smuzhiyun
107*4882a593Smuzhiyun idx = 0;
108*4882a593Smuzhiyun ranges = &(mem_rngs->ranges[0]);
109*4882a593Smuzhiyun for (i = 1; i < mem_rngs->nr_ranges; i++) {
110*4882a593Smuzhiyun if (ranges[i].start <= (ranges[i-1].end + 1))
111*4882a593Smuzhiyun ranges[idx].end = ranges[i].end;
112*4882a593Smuzhiyun else {
113*4882a593Smuzhiyun idx++;
114*4882a593Smuzhiyun if (i == idx)
115*4882a593Smuzhiyun continue;
116*4882a593Smuzhiyun
117*4882a593Smuzhiyun ranges[idx] = ranges[i];
118*4882a593Smuzhiyun }
119*4882a593Smuzhiyun }
120*4882a593Smuzhiyun mem_rngs->nr_ranges = idx + 1;
121*4882a593Smuzhiyun }
122*4882a593Smuzhiyun
123*4882a593Smuzhiyun /* cmp_func_t callback to sort ranges with sort() */
rngcmp(const void * _x,const void * _y)124*4882a593Smuzhiyun static int rngcmp(const void *_x, const void *_y)
125*4882a593Smuzhiyun {
126*4882a593Smuzhiyun const struct crash_mem_range *x = _x, *y = _y;
127*4882a593Smuzhiyun
128*4882a593Smuzhiyun if (x->start > y->start)
129*4882a593Smuzhiyun return 1;
130*4882a593Smuzhiyun if (x->start < y->start)
131*4882a593Smuzhiyun return -1;
132*4882a593Smuzhiyun return 0;
133*4882a593Smuzhiyun }
134*4882a593Smuzhiyun
135*4882a593Smuzhiyun /**
136*4882a593Smuzhiyun * sort_memory_ranges - Sorts the given memory ranges list.
137*4882a593Smuzhiyun * @mem_rngs: Range list to sort.
138*4882a593Smuzhiyun * @merge: If true, merge the list after sorting.
139*4882a593Smuzhiyun *
140*4882a593Smuzhiyun * Returns nothing.
141*4882a593Smuzhiyun */
sort_memory_ranges(struct crash_mem * mem_rngs,bool merge)142*4882a593Smuzhiyun void sort_memory_ranges(struct crash_mem *mem_rngs, bool merge)
143*4882a593Smuzhiyun {
144*4882a593Smuzhiyun int i;
145*4882a593Smuzhiyun
146*4882a593Smuzhiyun if (!mem_rngs)
147*4882a593Smuzhiyun return;
148*4882a593Smuzhiyun
149*4882a593Smuzhiyun /* Sort the ranges in-place */
150*4882a593Smuzhiyun sort(&(mem_rngs->ranges[0]), mem_rngs->nr_ranges,
151*4882a593Smuzhiyun sizeof(mem_rngs->ranges[0]), rngcmp, NULL);
152*4882a593Smuzhiyun
153*4882a593Smuzhiyun if (merge)
154*4882a593Smuzhiyun __merge_memory_ranges(mem_rngs);
155*4882a593Smuzhiyun
156*4882a593Smuzhiyun /* For debugging purpose */
157*4882a593Smuzhiyun pr_debug("Memory ranges:\n");
158*4882a593Smuzhiyun for (i = 0; i < mem_rngs->nr_ranges; i++) {
159*4882a593Smuzhiyun pr_debug("\t[%03d][%#016llx - %#016llx]\n", i,
160*4882a593Smuzhiyun mem_rngs->ranges[i].start,
161*4882a593Smuzhiyun mem_rngs->ranges[i].end);
162*4882a593Smuzhiyun }
163*4882a593Smuzhiyun }
164*4882a593Smuzhiyun
165*4882a593Smuzhiyun /**
166*4882a593Smuzhiyun * realloc_mem_ranges - reallocate mem_ranges with size incremented
167*4882a593Smuzhiyun * by MEM_RANGE_CHUNK_SZ. Frees up the old memory,
168*4882a593Smuzhiyun * if memory allocation fails.
169*4882a593Smuzhiyun * @mem_ranges: Memory ranges to reallocate.
170*4882a593Smuzhiyun *
171*4882a593Smuzhiyun * Returns pointer to reallocated memory on success, NULL otherwise.
172*4882a593Smuzhiyun */
realloc_mem_ranges(struct crash_mem ** mem_ranges)173*4882a593Smuzhiyun struct crash_mem *realloc_mem_ranges(struct crash_mem **mem_ranges)
174*4882a593Smuzhiyun {
175*4882a593Smuzhiyun struct crash_mem *mem_rngs = *mem_ranges;
176*4882a593Smuzhiyun unsigned int nr_ranges;
177*4882a593Smuzhiyun size_t size;
178*4882a593Smuzhiyun
179*4882a593Smuzhiyun size = get_mem_rngs_size(mem_rngs);
180*4882a593Smuzhiyun nr_ranges = mem_rngs ? mem_rngs->nr_ranges : 0;
181*4882a593Smuzhiyun
182*4882a593Smuzhiyun size += MEM_RANGE_CHUNK_SZ;
183*4882a593Smuzhiyun mem_rngs = krealloc(*mem_ranges, size, GFP_KERNEL);
184*4882a593Smuzhiyun if (!mem_rngs) {
185*4882a593Smuzhiyun kfree(*mem_ranges);
186*4882a593Smuzhiyun *mem_ranges = NULL;
187*4882a593Smuzhiyun return NULL;
188*4882a593Smuzhiyun }
189*4882a593Smuzhiyun
190*4882a593Smuzhiyun mem_rngs->nr_ranges = nr_ranges;
191*4882a593Smuzhiyun mem_rngs->max_nr_ranges = get_max_nr_ranges(size);
192*4882a593Smuzhiyun *mem_ranges = mem_rngs;
193*4882a593Smuzhiyun
194*4882a593Smuzhiyun return mem_rngs;
195*4882a593Smuzhiyun }
196*4882a593Smuzhiyun
197*4882a593Smuzhiyun /**
198*4882a593Smuzhiyun * add_mem_range - Updates existing memory range, if there is an overlap.
199*4882a593Smuzhiyun * Else, adds a new memory range.
200*4882a593Smuzhiyun * @mem_ranges: Range list to add the memory range to.
201*4882a593Smuzhiyun * @base: Base address of the range to add.
202*4882a593Smuzhiyun * @size: Size of the memory range to add.
203*4882a593Smuzhiyun *
204*4882a593Smuzhiyun * (Re)allocates memory, if needed.
205*4882a593Smuzhiyun *
206*4882a593Smuzhiyun * Returns 0 on success, negative errno on error.
207*4882a593Smuzhiyun */
add_mem_range(struct crash_mem ** mem_ranges,u64 base,u64 size)208*4882a593Smuzhiyun int add_mem_range(struct crash_mem **mem_ranges, u64 base, u64 size)
209*4882a593Smuzhiyun {
210*4882a593Smuzhiyun struct crash_mem *mem_rngs = *mem_ranges;
211*4882a593Smuzhiyun u64 mstart, mend, end;
212*4882a593Smuzhiyun unsigned int i;
213*4882a593Smuzhiyun
214*4882a593Smuzhiyun if (!size)
215*4882a593Smuzhiyun return 0;
216*4882a593Smuzhiyun
217*4882a593Smuzhiyun end = base + size - 1;
218*4882a593Smuzhiyun
219*4882a593Smuzhiyun if (!mem_rngs || !(mem_rngs->nr_ranges))
220*4882a593Smuzhiyun return __add_mem_range(mem_ranges, base, size);
221*4882a593Smuzhiyun
222*4882a593Smuzhiyun for (i = 0; i < mem_rngs->nr_ranges; i++) {
223*4882a593Smuzhiyun mstart = mem_rngs->ranges[i].start;
224*4882a593Smuzhiyun mend = mem_rngs->ranges[i].end;
225*4882a593Smuzhiyun if (base < mend && end > mstart) {
226*4882a593Smuzhiyun if (base < mstart)
227*4882a593Smuzhiyun mem_rngs->ranges[i].start = base;
228*4882a593Smuzhiyun if (end > mend)
229*4882a593Smuzhiyun mem_rngs->ranges[i].end = end;
230*4882a593Smuzhiyun return 0;
231*4882a593Smuzhiyun }
232*4882a593Smuzhiyun }
233*4882a593Smuzhiyun
234*4882a593Smuzhiyun return __add_mem_range(mem_ranges, base, size);
235*4882a593Smuzhiyun }
236*4882a593Smuzhiyun
237*4882a593Smuzhiyun /**
238*4882a593Smuzhiyun * add_tce_mem_ranges - Adds tce-table range to the given memory ranges list.
239*4882a593Smuzhiyun * @mem_ranges: Range list to add the memory range(s) to.
240*4882a593Smuzhiyun *
241*4882a593Smuzhiyun * Returns 0 on success, negative errno on error.
242*4882a593Smuzhiyun */
add_tce_mem_ranges(struct crash_mem ** mem_ranges)243*4882a593Smuzhiyun int add_tce_mem_ranges(struct crash_mem **mem_ranges)
244*4882a593Smuzhiyun {
245*4882a593Smuzhiyun struct device_node *dn = NULL;
246*4882a593Smuzhiyun int ret = 0;
247*4882a593Smuzhiyun
248*4882a593Smuzhiyun for_each_node_by_type(dn, "pci") {
249*4882a593Smuzhiyun u64 base;
250*4882a593Smuzhiyun u32 size;
251*4882a593Smuzhiyun
252*4882a593Smuzhiyun ret = of_property_read_u64(dn, "linux,tce-base", &base);
253*4882a593Smuzhiyun ret |= of_property_read_u32(dn, "linux,tce-size", &size);
254*4882a593Smuzhiyun if (ret) {
255*4882a593Smuzhiyun /*
256*4882a593Smuzhiyun * It is ok to have pci nodes without tce. So, ignore
257*4882a593Smuzhiyun * property does not exist error.
258*4882a593Smuzhiyun */
259*4882a593Smuzhiyun if (ret == -EINVAL) {
260*4882a593Smuzhiyun ret = 0;
261*4882a593Smuzhiyun continue;
262*4882a593Smuzhiyun }
263*4882a593Smuzhiyun break;
264*4882a593Smuzhiyun }
265*4882a593Smuzhiyun
266*4882a593Smuzhiyun ret = add_mem_range(mem_ranges, base, size);
267*4882a593Smuzhiyun if (ret)
268*4882a593Smuzhiyun break;
269*4882a593Smuzhiyun }
270*4882a593Smuzhiyun
271*4882a593Smuzhiyun of_node_put(dn);
272*4882a593Smuzhiyun return ret;
273*4882a593Smuzhiyun }
274*4882a593Smuzhiyun
275*4882a593Smuzhiyun /**
276*4882a593Smuzhiyun * add_initrd_mem_range - Adds initrd range to the given memory ranges list,
277*4882a593Smuzhiyun * if the initrd was retained.
278*4882a593Smuzhiyun * @mem_ranges: Range list to add the memory range to.
279*4882a593Smuzhiyun *
280*4882a593Smuzhiyun * Returns 0 on success, negative errno on error.
281*4882a593Smuzhiyun */
add_initrd_mem_range(struct crash_mem ** mem_ranges)282*4882a593Smuzhiyun int add_initrd_mem_range(struct crash_mem **mem_ranges)
283*4882a593Smuzhiyun {
284*4882a593Smuzhiyun u64 base, end;
285*4882a593Smuzhiyun int ret;
286*4882a593Smuzhiyun
287*4882a593Smuzhiyun /* This range means something, only if initrd was retained */
288*4882a593Smuzhiyun if (!strstr(saved_command_line, "retain_initrd"))
289*4882a593Smuzhiyun return 0;
290*4882a593Smuzhiyun
291*4882a593Smuzhiyun ret = of_property_read_u64(of_chosen, "linux,initrd-start", &base);
292*4882a593Smuzhiyun ret |= of_property_read_u64(of_chosen, "linux,initrd-end", &end);
293*4882a593Smuzhiyun if (!ret)
294*4882a593Smuzhiyun ret = add_mem_range(mem_ranges, base, end - base + 1);
295*4882a593Smuzhiyun
296*4882a593Smuzhiyun return ret;
297*4882a593Smuzhiyun }
298*4882a593Smuzhiyun
299*4882a593Smuzhiyun #ifdef CONFIG_PPC_BOOK3S_64
300*4882a593Smuzhiyun /**
301*4882a593Smuzhiyun * add_htab_mem_range - Adds htab range to the given memory ranges list,
302*4882a593Smuzhiyun * if it exists
303*4882a593Smuzhiyun * @mem_ranges: Range list to add the memory range to.
304*4882a593Smuzhiyun *
305*4882a593Smuzhiyun * Returns 0 on success, negative errno on error.
306*4882a593Smuzhiyun */
add_htab_mem_range(struct crash_mem ** mem_ranges)307*4882a593Smuzhiyun int add_htab_mem_range(struct crash_mem **mem_ranges)
308*4882a593Smuzhiyun {
309*4882a593Smuzhiyun if (!htab_address)
310*4882a593Smuzhiyun return 0;
311*4882a593Smuzhiyun
312*4882a593Smuzhiyun return add_mem_range(mem_ranges, __pa(htab_address), htab_size_bytes);
313*4882a593Smuzhiyun }
314*4882a593Smuzhiyun #endif
315*4882a593Smuzhiyun
316*4882a593Smuzhiyun /**
317*4882a593Smuzhiyun * add_kernel_mem_range - Adds kernel text region to the given
318*4882a593Smuzhiyun * memory ranges list.
319*4882a593Smuzhiyun * @mem_ranges: Range list to add the memory range to.
320*4882a593Smuzhiyun *
321*4882a593Smuzhiyun * Returns 0 on success, negative errno on error.
322*4882a593Smuzhiyun */
add_kernel_mem_range(struct crash_mem ** mem_ranges)323*4882a593Smuzhiyun int add_kernel_mem_range(struct crash_mem **mem_ranges)
324*4882a593Smuzhiyun {
325*4882a593Smuzhiyun return add_mem_range(mem_ranges, 0, __pa(_end));
326*4882a593Smuzhiyun }
327*4882a593Smuzhiyun
328*4882a593Smuzhiyun /**
329*4882a593Smuzhiyun * add_rtas_mem_range - Adds RTAS region to the given memory ranges list.
330*4882a593Smuzhiyun * @mem_ranges: Range list to add the memory range to.
331*4882a593Smuzhiyun *
332*4882a593Smuzhiyun * Returns 0 on success, negative errno on error.
333*4882a593Smuzhiyun */
add_rtas_mem_range(struct crash_mem ** mem_ranges)334*4882a593Smuzhiyun int add_rtas_mem_range(struct crash_mem **mem_ranges)
335*4882a593Smuzhiyun {
336*4882a593Smuzhiyun struct device_node *dn;
337*4882a593Smuzhiyun u32 base, size;
338*4882a593Smuzhiyun int ret = 0;
339*4882a593Smuzhiyun
340*4882a593Smuzhiyun dn = of_find_node_by_path("/rtas");
341*4882a593Smuzhiyun if (!dn)
342*4882a593Smuzhiyun return 0;
343*4882a593Smuzhiyun
344*4882a593Smuzhiyun ret = of_property_read_u32(dn, "linux,rtas-base", &base);
345*4882a593Smuzhiyun ret |= of_property_read_u32(dn, "rtas-size", &size);
346*4882a593Smuzhiyun if (!ret)
347*4882a593Smuzhiyun ret = add_mem_range(mem_ranges, base, size);
348*4882a593Smuzhiyun
349*4882a593Smuzhiyun of_node_put(dn);
350*4882a593Smuzhiyun return ret;
351*4882a593Smuzhiyun }
352*4882a593Smuzhiyun
353*4882a593Smuzhiyun /**
354*4882a593Smuzhiyun * add_opal_mem_range - Adds OPAL region to the given memory ranges list.
355*4882a593Smuzhiyun * @mem_ranges: Range list to add the memory range to.
356*4882a593Smuzhiyun *
357*4882a593Smuzhiyun * Returns 0 on success, negative errno on error.
358*4882a593Smuzhiyun */
add_opal_mem_range(struct crash_mem ** mem_ranges)359*4882a593Smuzhiyun int add_opal_mem_range(struct crash_mem **mem_ranges)
360*4882a593Smuzhiyun {
361*4882a593Smuzhiyun struct device_node *dn;
362*4882a593Smuzhiyun u64 base, size;
363*4882a593Smuzhiyun int ret;
364*4882a593Smuzhiyun
365*4882a593Smuzhiyun dn = of_find_node_by_path("/ibm,opal");
366*4882a593Smuzhiyun if (!dn)
367*4882a593Smuzhiyun return 0;
368*4882a593Smuzhiyun
369*4882a593Smuzhiyun ret = of_property_read_u64(dn, "opal-base-address", &base);
370*4882a593Smuzhiyun ret |= of_property_read_u64(dn, "opal-runtime-size", &size);
371*4882a593Smuzhiyun if (!ret)
372*4882a593Smuzhiyun ret = add_mem_range(mem_ranges, base, size);
373*4882a593Smuzhiyun
374*4882a593Smuzhiyun of_node_put(dn);
375*4882a593Smuzhiyun return ret;
376*4882a593Smuzhiyun }
377*4882a593Smuzhiyun
378*4882a593Smuzhiyun /**
379*4882a593Smuzhiyun * add_reserved_mem_ranges - Adds "/reserved-ranges" regions exported by f/w
380*4882a593Smuzhiyun * to the given memory ranges list.
381*4882a593Smuzhiyun * @mem_ranges: Range list to add the memory ranges to.
382*4882a593Smuzhiyun *
383*4882a593Smuzhiyun * Returns 0 on success, negative errno on error.
384*4882a593Smuzhiyun */
add_reserved_mem_ranges(struct crash_mem ** mem_ranges)385*4882a593Smuzhiyun int add_reserved_mem_ranges(struct crash_mem **mem_ranges)
386*4882a593Smuzhiyun {
387*4882a593Smuzhiyun int n_mem_addr_cells, n_mem_size_cells, i, len, cells, ret = 0;
388*4882a593Smuzhiyun const __be32 *prop;
389*4882a593Smuzhiyun
390*4882a593Smuzhiyun prop = of_get_property(of_root, "reserved-ranges", &len);
391*4882a593Smuzhiyun if (!prop)
392*4882a593Smuzhiyun return 0;
393*4882a593Smuzhiyun
394*4882a593Smuzhiyun n_mem_addr_cells = of_n_addr_cells(of_root);
395*4882a593Smuzhiyun n_mem_size_cells = of_n_size_cells(of_root);
396*4882a593Smuzhiyun cells = n_mem_addr_cells + n_mem_size_cells;
397*4882a593Smuzhiyun
398*4882a593Smuzhiyun /* Each reserved range is an (address,size) pair */
399*4882a593Smuzhiyun for (i = 0; i < (len / (sizeof(u32) * cells)); i++) {
400*4882a593Smuzhiyun u64 base, size;
401*4882a593Smuzhiyun
402*4882a593Smuzhiyun base = of_read_number(prop + (i * cells), n_mem_addr_cells);
403*4882a593Smuzhiyun size = of_read_number(prop + (i * cells) + n_mem_addr_cells,
404*4882a593Smuzhiyun n_mem_size_cells);
405*4882a593Smuzhiyun
406*4882a593Smuzhiyun ret = add_mem_range(mem_ranges, base, size);
407*4882a593Smuzhiyun if (ret)
408*4882a593Smuzhiyun break;
409*4882a593Smuzhiyun }
410*4882a593Smuzhiyun
411*4882a593Smuzhiyun return ret;
412*4882a593Smuzhiyun }
413