1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * ppc64 code to implement the kexec_file_load syscall
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * Copyright (C) 2004 Adam Litke (agl@us.ibm.com)
6*4882a593Smuzhiyun * Copyright (C) 2004 IBM Corp.
7*4882a593Smuzhiyun * Copyright (C) 2004,2005 Milton D Miller II, IBM Corporation
8*4882a593Smuzhiyun * Copyright (C) 2005 R Sharada (sharada@in.ibm.com)
9*4882a593Smuzhiyun * Copyright (C) 2006 Mohan Kumar M (mohan@in.ibm.com)
10*4882a593Smuzhiyun * Copyright (C) 2020 IBM Corporation
11*4882a593Smuzhiyun *
12*4882a593Smuzhiyun * Based on kexec-tools' kexec-ppc64.c, kexec-elf-rel-ppc64.c, fs2dt.c.
13*4882a593Smuzhiyun * Heavily modified for the kernel by
14*4882a593Smuzhiyun * Hari Bathini, IBM Corporation.
15*4882a593Smuzhiyun */
16*4882a593Smuzhiyun
17*4882a593Smuzhiyun #include <linux/kexec.h>
18*4882a593Smuzhiyun #include <linux/of_fdt.h>
19*4882a593Smuzhiyun #include <linux/libfdt.h>
20*4882a593Smuzhiyun #include <linux/of_device.h>
21*4882a593Smuzhiyun #include <linux/memblock.h>
22*4882a593Smuzhiyun #include <linux/slab.h>
23*4882a593Smuzhiyun #include <linux/vmalloc.h>
24*4882a593Smuzhiyun #include <asm/setup.h>
25*4882a593Smuzhiyun #include <asm/drmem.h>
26*4882a593Smuzhiyun #include <asm/kexec_ranges.h>
27*4882a593Smuzhiyun #include <asm/crashdump-ppc64.h>
28*4882a593Smuzhiyun
29*4882a593Smuzhiyun struct umem_info {
30*4882a593Smuzhiyun u64 *buf; /* data buffer for usable-memory property */
31*4882a593Smuzhiyun u32 size; /* size allocated for the data buffer */
32*4882a593Smuzhiyun u32 max_entries; /* maximum no. of entries */
33*4882a593Smuzhiyun u32 idx; /* index of current entry */
34*4882a593Smuzhiyun
35*4882a593Smuzhiyun /* usable memory ranges to look up */
36*4882a593Smuzhiyun unsigned int nr_ranges;
37*4882a593Smuzhiyun const struct crash_mem_range *ranges;
38*4882a593Smuzhiyun };
39*4882a593Smuzhiyun
40*4882a593Smuzhiyun const struct kexec_file_ops * const kexec_file_loaders[] = {
41*4882a593Smuzhiyun &kexec_elf64_ops,
42*4882a593Smuzhiyun NULL
43*4882a593Smuzhiyun };
44*4882a593Smuzhiyun
45*4882a593Smuzhiyun /**
46*4882a593Smuzhiyun * get_exclude_memory_ranges - Get exclude memory ranges. This list includes
47*4882a593Smuzhiyun * regions like opal/rtas, tce-table, initrd,
48*4882a593Smuzhiyun * kernel, htab which should be avoided while
49*4882a593Smuzhiyun * setting up kexec load segments.
50*4882a593Smuzhiyun * @mem_ranges: Range list to add the memory ranges to.
51*4882a593Smuzhiyun *
52*4882a593Smuzhiyun * Returns 0 on success, negative errno on error.
53*4882a593Smuzhiyun */
get_exclude_memory_ranges(struct crash_mem ** mem_ranges)54*4882a593Smuzhiyun static int get_exclude_memory_ranges(struct crash_mem **mem_ranges)
55*4882a593Smuzhiyun {
56*4882a593Smuzhiyun int ret;
57*4882a593Smuzhiyun
58*4882a593Smuzhiyun ret = add_tce_mem_ranges(mem_ranges);
59*4882a593Smuzhiyun if (ret)
60*4882a593Smuzhiyun goto out;
61*4882a593Smuzhiyun
62*4882a593Smuzhiyun ret = add_initrd_mem_range(mem_ranges);
63*4882a593Smuzhiyun if (ret)
64*4882a593Smuzhiyun goto out;
65*4882a593Smuzhiyun
66*4882a593Smuzhiyun ret = add_htab_mem_range(mem_ranges);
67*4882a593Smuzhiyun if (ret)
68*4882a593Smuzhiyun goto out;
69*4882a593Smuzhiyun
70*4882a593Smuzhiyun ret = add_kernel_mem_range(mem_ranges);
71*4882a593Smuzhiyun if (ret)
72*4882a593Smuzhiyun goto out;
73*4882a593Smuzhiyun
74*4882a593Smuzhiyun ret = add_rtas_mem_range(mem_ranges);
75*4882a593Smuzhiyun if (ret)
76*4882a593Smuzhiyun goto out;
77*4882a593Smuzhiyun
78*4882a593Smuzhiyun ret = add_opal_mem_range(mem_ranges);
79*4882a593Smuzhiyun if (ret)
80*4882a593Smuzhiyun goto out;
81*4882a593Smuzhiyun
82*4882a593Smuzhiyun ret = add_reserved_mem_ranges(mem_ranges);
83*4882a593Smuzhiyun if (ret)
84*4882a593Smuzhiyun goto out;
85*4882a593Smuzhiyun
86*4882a593Smuzhiyun /* exclude memory ranges should be sorted for easy lookup */
87*4882a593Smuzhiyun sort_memory_ranges(*mem_ranges, true);
88*4882a593Smuzhiyun out:
89*4882a593Smuzhiyun if (ret)
90*4882a593Smuzhiyun pr_err("Failed to setup exclude memory ranges\n");
91*4882a593Smuzhiyun return ret;
92*4882a593Smuzhiyun }
93*4882a593Smuzhiyun
94*4882a593Smuzhiyun /**
95*4882a593Smuzhiyun * get_usable_memory_ranges - Get usable memory ranges. This list includes
96*4882a593Smuzhiyun * regions like crashkernel, opal/rtas & tce-table,
97*4882a593Smuzhiyun * that kdump kernel could use.
98*4882a593Smuzhiyun * @mem_ranges: Range list to add the memory ranges to.
99*4882a593Smuzhiyun *
100*4882a593Smuzhiyun * Returns 0 on success, negative errno on error.
101*4882a593Smuzhiyun */
get_usable_memory_ranges(struct crash_mem ** mem_ranges)102*4882a593Smuzhiyun static int get_usable_memory_ranges(struct crash_mem **mem_ranges)
103*4882a593Smuzhiyun {
104*4882a593Smuzhiyun int ret;
105*4882a593Smuzhiyun
106*4882a593Smuzhiyun /*
107*4882a593Smuzhiyun * Early boot failure observed on guests when low memory (first memory
108*4882a593Smuzhiyun * block?) is not added to usable memory. So, add [0, crashk_res.end]
109*4882a593Smuzhiyun * instead of [crashk_res.start, crashk_res.end] to workaround it.
110*4882a593Smuzhiyun * Also, crashed kernel's memory must be added to reserve map to
111*4882a593Smuzhiyun * avoid kdump kernel from using it.
112*4882a593Smuzhiyun */
113*4882a593Smuzhiyun ret = add_mem_range(mem_ranges, 0, crashk_res.end + 1);
114*4882a593Smuzhiyun if (ret)
115*4882a593Smuzhiyun goto out;
116*4882a593Smuzhiyun
117*4882a593Smuzhiyun ret = add_rtas_mem_range(mem_ranges);
118*4882a593Smuzhiyun if (ret)
119*4882a593Smuzhiyun goto out;
120*4882a593Smuzhiyun
121*4882a593Smuzhiyun ret = add_opal_mem_range(mem_ranges);
122*4882a593Smuzhiyun if (ret)
123*4882a593Smuzhiyun goto out;
124*4882a593Smuzhiyun
125*4882a593Smuzhiyun ret = add_tce_mem_ranges(mem_ranges);
126*4882a593Smuzhiyun out:
127*4882a593Smuzhiyun if (ret)
128*4882a593Smuzhiyun pr_err("Failed to setup usable memory ranges\n");
129*4882a593Smuzhiyun return ret;
130*4882a593Smuzhiyun }
131*4882a593Smuzhiyun
132*4882a593Smuzhiyun /**
133*4882a593Smuzhiyun * get_crash_memory_ranges - Get crash memory ranges. This list includes
134*4882a593Smuzhiyun * first/crashing kernel's memory regions that
135*4882a593Smuzhiyun * would be exported via an elfcore.
136*4882a593Smuzhiyun * @mem_ranges: Range list to add the memory ranges to.
137*4882a593Smuzhiyun *
138*4882a593Smuzhiyun * Returns 0 on success, negative errno on error.
139*4882a593Smuzhiyun */
get_crash_memory_ranges(struct crash_mem ** mem_ranges)140*4882a593Smuzhiyun static int get_crash_memory_ranges(struct crash_mem **mem_ranges)
141*4882a593Smuzhiyun {
142*4882a593Smuzhiyun phys_addr_t base, end;
143*4882a593Smuzhiyun struct crash_mem *tmem;
144*4882a593Smuzhiyun u64 i;
145*4882a593Smuzhiyun int ret;
146*4882a593Smuzhiyun
147*4882a593Smuzhiyun for_each_mem_range(i, &base, &end) {
148*4882a593Smuzhiyun u64 size = end - base;
149*4882a593Smuzhiyun
150*4882a593Smuzhiyun /* Skip backup memory region, which needs a separate entry */
151*4882a593Smuzhiyun if (base == BACKUP_SRC_START) {
152*4882a593Smuzhiyun if (size > BACKUP_SRC_SIZE) {
153*4882a593Smuzhiyun base = BACKUP_SRC_END + 1;
154*4882a593Smuzhiyun size -= BACKUP_SRC_SIZE;
155*4882a593Smuzhiyun } else
156*4882a593Smuzhiyun continue;
157*4882a593Smuzhiyun }
158*4882a593Smuzhiyun
159*4882a593Smuzhiyun ret = add_mem_range(mem_ranges, base, size);
160*4882a593Smuzhiyun if (ret)
161*4882a593Smuzhiyun goto out;
162*4882a593Smuzhiyun
163*4882a593Smuzhiyun /* Try merging adjacent ranges before reallocation attempt */
164*4882a593Smuzhiyun if ((*mem_ranges)->nr_ranges == (*mem_ranges)->max_nr_ranges)
165*4882a593Smuzhiyun sort_memory_ranges(*mem_ranges, true);
166*4882a593Smuzhiyun }
167*4882a593Smuzhiyun
168*4882a593Smuzhiyun /* Reallocate memory ranges if there is no space to split ranges */
169*4882a593Smuzhiyun tmem = *mem_ranges;
170*4882a593Smuzhiyun if (tmem && (tmem->nr_ranges == tmem->max_nr_ranges)) {
171*4882a593Smuzhiyun tmem = realloc_mem_ranges(mem_ranges);
172*4882a593Smuzhiyun if (!tmem)
173*4882a593Smuzhiyun goto out;
174*4882a593Smuzhiyun }
175*4882a593Smuzhiyun
176*4882a593Smuzhiyun /* Exclude crashkernel region */
177*4882a593Smuzhiyun ret = crash_exclude_mem_range(tmem, crashk_res.start, crashk_res.end);
178*4882a593Smuzhiyun if (ret)
179*4882a593Smuzhiyun goto out;
180*4882a593Smuzhiyun
181*4882a593Smuzhiyun /*
182*4882a593Smuzhiyun * FIXME: For now, stay in parity with kexec-tools but if RTAS/OPAL
183*4882a593Smuzhiyun * regions are exported to save their context at the time of
184*4882a593Smuzhiyun * crash, they should actually be backed up just like the
185*4882a593Smuzhiyun * first 64K bytes of memory.
186*4882a593Smuzhiyun */
187*4882a593Smuzhiyun ret = add_rtas_mem_range(mem_ranges);
188*4882a593Smuzhiyun if (ret)
189*4882a593Smuzhiyun goto out;
190*4882a593Smuzhiyun
191*4882a593Smuzhiyun ret = add_opal_mem_range(mem_ranges);
192*4882a593Smuzhiyun if (ret)
193*4882a593Smuzhiyun goto out;
194*4882a593Smuzhiyun
195*4882a593Smuzhiyun /* create a separate program header for the backup region */
196*4882a593Smuzhiyun ret = add_mem_range(mem_ranges, BACKUP_SRC_START, BACKUP_SRC_SIZE);
197*4882a593Smuzhiyun if (ret)
198*4882a593Smuzhiyun goto out;
199*4882a593Smuzhiyun
200*4882a593Smuzhiyun sort_memory_ranges(*mem_ranges, false);
201*4882a593Smuzhiyun out:
202*4882a593Smuzhiyun if (ret)
203*4882a593Smuzhiyun pr_err("Failed to setup crash memory ranges\n");
204*4882a593Smuzhiyun return ret;
205*4882a593Smuzhiyun }
206*4882a593Smuzhiyun
207*4882a593Smuzhiyun /**
208*4882a593Smuzhiyun * get_reserved_memory_ranges - Get reserve memory ranges. This list includes
209*4882a593Smuzhiyun * memory regions that should be added to the
210*4882a593Smuzhiyun * memory reserve map to ensure the region is
211*4882a593Smuzhiyun * protected from any mischief.
212*4882a593Smuzhiyun * @mem_ranges: Range list to add the memory ranges to.
213*4882a593Smuzhiyun *
214*4882a593Smuzhiyun * Returns 0 on success, negative errno on error.
215*4882a593Smuzhiyun */
get_reserved_memory_ranges(struct crash_mem ** mem_ranges)216*4882a593Smuzhiyun static int get_reserved_memory_ranges(struct crash_mem **mem_ranges)
217*4882a593Smuzhiyun {
218*4882a593Smuzhiyun int ret;
219*4882a593Smuzhiyun
220*4882a593Smuzhiyun ret = add_rtas_mem_range(mem_ranges);
221*4882a593Smuzhiyun if (ret)
222*4882a593Smuzhiyun goto out;
223*4882a593Smuzhiyun
224*4882a593Smuzhiyun ret = add_tce_mem_ranges(mem_ranges);
225*4882a593Smuzhiyun if (ret)
226*4882a593Smuzhiyun goto out;
227*4882a593Smuzhiyun
228*4882a593Smuzhiyun ret = add_reserved_mem_ranges(mem_ranges);
229*4882a593Smuzhiyun out:
230*4882a593Smuzhiyun if (ret)
231*4882a593Smuzhiyun pr_err("Failed to setup reserved memory ranges\n");
232*4882a593Smuzhiyun return ret;
233*4882a593Smuzhiyun }
234*4882a593Smuzhiyun
235*4882a593Smuzhiyun /**
236*4882a593Smuzhiyun * __locate_mem_hole_top_down - Looks top down for a large enough memory hole
237*4882a593Smuzhiyun * in the memory regions between buf_min & buf_max
238*4882a593Smuzhiyun * for the buffer. If found, sets kbuf->mem.
239*4882a593Smuzhiyun * @kbuf: Buffer contents and memory parameters.
240*4882a593Smuzhiyun * @buf_min: Minimum address for the buffer.
241*4882a593Smuzhiyun * @buf_max: Maximum address for the buffer.
242*4882a593Smuzhiyun *
243*4882a593Smuzhiyun * Returns 0 on success, negative errno on error.
244*4882a593Smuzhiyun */
__locate_mem_hole_top_down(struct kexec_buf * kbuf,u64 buf_min,u64 buf_max)245*4882a593Smuzhiyun static int __locate_mem_hole_top_down(struct kexec_buf *kbuf,
246*4882a593Smuzhiyun u64 buf_min, u64 buf_max)
247*4882a593Smuzhiyun {
248*4882a593Smuzhiyun int ret = -EADDRNOTAVAIL;
249*4882a593Smuzhiyun phys_addr_t start, end;
250*4882a593Smuzhiyun u64 i;
251*4882a593Smuzhiyun
252*4882a593Smuzhiyun for_each_mem_range_rev(i, &start, &end) {
253*4882a593Smuzhiyun /*
254*4882a593Smuzhiyun * memblock uses [start, end) convention while it is
255*4882a593Smuzhiyun * [start, end] here. Fix the off-by-one to have the
256*4882a593Smuzhiyun * same convention.
257*4882a593Smuzhiyun */
258*4882a593Smuzhiyun end -= 1;
259*4882a593Smuzhiyun
260*4882a593Smuzhiyun if (start > buf_max)
261*4882a593Smuzhiyun continue;
262*4882a593Smuzhiyun
263*4882a593Smuzhiyun /* Memory hole not found */
264*4882a593Smuzhiyun if (end < buf_min)
265*4882a593Smuzhiyun break;
266*4882a593Smuzhiyun
267*4882a593Smuzhiyun /* Adjust memory region based on the given range */
268*4882a593Smuzhiyun if (start < buf_min)
269*4882a593Smuzhiyun start = buf_min;
270*4882a593Smuzhiyun if (end > buf_max)
271*4882a593Smuzhiyun end = buf_max;
272*4882a593Smuzhiyun
273*4882a593Smuzhiyun start = ALIGN(start, kbuf->buf_align);
274*4882a593Smuzhiyun if (start < end && (end - start + 1) >= kbuf->memsz) {
275*4882a593Smuzhiyun /* Suitable memory range found. Set kbuf->mem */
276*4882a593Smuzhiyun kbuf->mem = ALIGN_DOWN(end - kbuf->memsz + 1,
277*4882a593Smuzhiyun kbuf->buf_align);
278*4882a593Smuzhiyun ret = 0;
279*4882a593Smuzhiyun break;
280*4882a593Smuzhiyun }
281*4882a593Smuzhiyun }
282*4882a593Smuzhiyun
283*4882a593Smuzhiyun return ret;
284*4882a593Smuzhiyun }
285*4882a593Smuzhiyun
286*4882a593Smuzhiyun /**
287*4882a593Smuzhiyun * locate_mem_hole_top_down_ppc64 - Skip special memory regions to find a
288*4882a593Smuzhiyun * suitable buffer with top down approach.
289*4882a593Smuzhiyun * @kbuf: Buffer contents and memory parameters.
290*4882a593Smuzhiyun * @buf_min: Minimum address for the buffer.
291*4882a593Smuzhiyun * @buf_max: Maximum address for the buffer.
292*4882a593Smuzhiyun * @emem: Exclude memory ranges.
293*4882a593Smuzhiyun *
294*4882a593Smuzhiyun * Returns 0 on success, negative errno on error.
295*4882a593Smuzhiyun */
locate_mem_hole_top_down_ppc64(struct kexec_buf * kbuf,u64 buf_min,u64 buf_max,const struct crash_mem * emem)296*4882a593Smuzhiyun static int locate_mem_hole_top_down_ppc64(struct kexec_buf *kbuf,
297*4882a593Smuzhiyun u64 buf_min, u64 buf_max,
298*4882a593Smuzhiyun const struct crash_mem *emem)
299*4882a593Smuzhiyun {
300*4882a593Smuzhiyun int i, ret = 0, err = -EADDRNOTAVAIL;
301*4882a593Smuzhiyun u64 start, end, tmin, tmax;
302*4882a593Smuzhiyun
303*4882a593Smuzhiyun tmax = buf_max;
304*4882a593Smuzhiyun for (i = (emem->nr_ranges - 1); i >= 0; i--) {
305*4882a593Smuzhiyun start = emem->ranges[i].start;
306*4882a593Smuzhiyun end = emem->ranges[i].end;
307*4882a593Smuzhiyun
308*4882a593Smuzhiyun if (start > tmax)
309*4882a593Smuzhiyun continue;
310*4882a593Smuzhiyun
311*4882a593Smuzhiyun if (end < tmax) {
312*4882a593Smuzhiyun tmin = (end < buf_min ? buf_min : end + 1);
313*4882a593Smuzhiyun ret = __locate_mem_hole_top_down(kbuf, tmin, tmax);
314*4882a593Smuzhiyun if (!ret)
315*4882a593Smuzhiyun return 0;
316*4882a593Smuzhiyun }
317*4882a593Smuzhiyun
318*4882a593Smuzhiyun tmax = start - 1;
319*4882a593Smuzhiyun
320*4882a593Smuzhiyun if (tmax < buf_min) {
321*4882a593Smuzhiyun ret = err;
322*4882a593Smuzhiyun break;
323*4882a593Smuzhiyun }
324*4882a593Smuzhiyun ret = 0;
325*4882a593Smuzhiyun }
326*4882a593Smuzhiyun
327*4882a593Smuzhiyun if (!ret) {
328*4882a593Smuzhiyun tmin = buf_min;
329*4882a593Smuzhiyun ret = __locate_mem_hole_top_down(kbuf, tmin, tmax);
330*4882a593Smuzhiyun }
331*4882a593Smuzhiyun return ret;
332*4882a593Smuzhiyun }
333*4882a593Smuzhiyun
334*4882a593Smuzhiyun /**
335*4882a593Smuzhiyun * __locate_mem_hole_bottom_up - Looks bottom up for a large enough memory hole
336*4882a593Smuzhiyun * in the memory regions between buf_min & buf_max
337*4882a593Smuzhiyun * for the buffer. If found, sets kbuf->mem.
338*4882a593Smuzhiyun * @kbuf: Buffer contents and memory parameters.
339*4882a593Smuzhiyun * @buf_min: Minimum address for the buffer.
340*4882a593Smuzhiyun * @buf_max: Maximum address for the buffer.
341*4882a593Smuzhiyun *
342*4882a593Smuzhiyun * Returns 0 on success, negative errno on error.
343*4882a593Smuzhiyun */
__locate_mem_hole_bottom_up(struct kexec_buf * kbuf,u64 buf_min,u64 buf_max)344*4882a593Smuzhiyun static int __locate_mem_hole_bottom_up(struct kexec_buf *kbuf,
345*4882a593Smuzhiyun u64 buf_min, u64 buf_max)
346*4882a593Smuzhiyun {
347*4882a593Smuzhiyun int ret = -EADDRNOTAVAIL;
348*4882a593Smuzhiyun phys_addr_t start, end;
349*4882a593Smuzhiyun u64 i;
350*4882a593Smuzhiyun
351*4882a593Smuzhiyun for_each_mem_range(i, &start, &end) {
352*4882a593Smuzhiyun /*
353*4882a593Smuzhiyun * memblock uses [start, end) convention while it is
354*4882a593Smuzhiyun * [start, end] here. Fix the off-by-one to have the
355*4882a593Smuzhiyun * same convention.
356*4882a593Smuzhiyun */
357*4882a593Smuzhiyun end -= 1;
358*4882a593Smuzhiyun
359*4882a593Smuzhiyun if (end < buf_min)
360*4882a593Smuzhiyun continue;
361*4882a593Smuzhiyun
362*4882a593Smuzhiyun /* Memory hole not found */
363*4882a593Smuzhiyun if (start > buf_max)
364*4882a593Smuzhiyun break;
365*4882a593Smuzhiyun
366*4882a593Smuzhiyun /* Adjust memory region based on the given range */
367*4882a593Smuzhiyun if (start < buf_min)
368*4882a593Smuzhiyun start = buf_min;
369*4882a593Smuzhiyun if (end > buf_max)
370*4882a593Smuzhiyun end = buf_max;
371*4882a593Smuzhiyun
372*4882a593Smuzhiyun start = ALIGN(start, kbuf->buf_align);
373*4882a593Smuzhiyun if (start < end && (end - start + 1) >= kbuf->memsz) {
374*4882a593Smuzhiyun /* Suitable memory range found. Set kbuf->mem */
375*4882a593Smuzhiyun kbuf->mem = start;
376*4882a593Smuzhiyun ret = 0;
377*4882a593Smuzhiyun break;
378*4882a593Smuzhiyun }
379*4882a593Smuzhiyun }
380*4882a593Smuzhiyun
381*4882a593Smuzhiyun return ret;
382*4882a593Smuzhiyun }
383*4882a593Smuzhiyun
384*4882a593Smuzhiyun /**
385*4882a593Smuzhiyun * locate_mem_hole_bottom_up_ppc64 - Skip special memory regions to find a
386*4882a593Smuzhiyun * suitable buffer with bottom up approach.
387*4882a593Smuzhiyun * @kbuf: Buffer contents and memory parameters.
388*4882a593Smuzhiyun * @buf_min: Minimum address for the buffer.
389*4882a593Smuzhiyun * @buf_max: Maximum address for the buffer.
390*4882a593Smuzhiyun * @emem: Exclude memory ranges.
391*4882a593Smuzhiyun *
392*4882a593Smuzhiyun * Returns 0 on success, negative errno on error.
393*4882a593Smuzhiyun */
locate_mem_hole_bottom_up_ppc64(struct kexec_buf * kbuf,u64 buf_min,u64 buf_max,const struct crash_mem * emem)394*4882a593Smuzhiyun static int locate_mem_hole_bottom_up_ppc64(struct kexec_buf *kbuf,
395*4882a593Smuzhiyun u64 buf_min, u64 buf_max,
396*4882a593Smuzhiyun const struct crash_mem *emem)
397*4882a593Smuzhiyun {
398*4882a593Smuzhiyun int i, ret = 0, err = -EADDRNOTAVAIL;
399*4882a593Smuzhiyun u64 start, end, tmin, tmax;
400*4882a593Smuzhiyun
401*4882a593Smuzhiyun tmin = buf_min;
402*4882a593Smuzhiyun for (i = 0; i < emem->nr_ranges; i++) {
403*4882a593Smuzhiyun start = emem->ranges[i].start;
404*4882a593Smuzhiyun end = emem->ranges[i].end;
405*4882a593Smuzhiyun
406*4882a593Smuzhiyun if (end < tmin)
407*4882a593Smuzhiyun continue;
408*4882a593Smuzhiyun
409*4882a593Smuzhiyun if (start > tmin) {
410*4882a593Smuzhiyun tmax = (start > buf_max ? buf_max : start - 1);
411*4882a593Smuzhiyun ret = __locate_mem_hole_bottom_up(kbuf, tmin, tmax);
412*4882a593Smuzhiyun if (!ret)
413*4882a593Smuzhiyun return 0;
414*4882a593Smuzhiyun }
415*4882a593Smuzhiyun
416*4882a593Smuzhiyun tmin = end + 1;
417*4882a593Smuzhiyun
418*4882a593Smuzhiyun if (tmin > buf_max) {
419*4882a593Smuzhiyun ret = err;
420*4882a593Smuzhiyun break;
421*4882a593Smuzhiyun }
422*4882a593Smuzhiyun ret = 0;
423*4882a593Smuzhiyun }
424*4882a593Smuzhiyun
425*4882a593Smuzhiyun if (!ret) {
426*4882a593Smuzhiyun tmax = buf_max;
427*4882a593Smuzhiyun ret = __locate_mem_hole_bottom_up(kbuf, tmin, tmax);
428*4882a593Smuzhiyun }
429*4882a593Smuzhiyun return ret;
430*4882a593Smuzhiyun }
431*4882a593Smuzhiyun
432*4882a593Smuzhiyun /**
433*4882a593Smuzhiyun * check_realloc_usable_mem - Reallocate buffer if it can't accommodate entries
434*4882a593Smuzhiyun * @um_info: Usable memory buffer and ranges info.
435*4882a593Smuzhiyun * @cnt: No. of entries to accommodate.
436*4882a593Smuzhiyun *
437*4882a593Smuzhiyun * Frees up the old buffer if memory reallocation fails.
438*4882a593Smuzhiyun *
439*4882a593Smuzhiyun * Returns buffer on success, NULL on error.
440*4882a593Smuzhiyun */
check_realloc_usable_mem(struct umem_info * um_info,int cnt)441*4882a593Smuzhiyun static u64 *check_realloc_usable_mem(struct umem_info *um_info, int cnt)
442*4882a593Smuzhiyun {
443*4882a593Smuzhiyun u32 new_size;
444*4882a593Smuzhiyun u64 *tbuf;
445*4882a593Smuzhiyun
446*4882a593Smuzhiyun if ((um_info->idx + cnt) <= um_info->max_entries)
447*4882a593Smuzhiyun return um_info->buf;
448*4882a593Smuzhiyun
449*4882a593Smuzhiyun new_size = um_info->size + MEM_RANGE_CHUNK_SZ;
450*4882a593Smuzhiyun tbuf = krealloc(um_info->buf, new_size, GFP_KERNEL);
451*4882a593Smuzhiyun if (tbuf) {
452*4882a593Smuzhiyun um_info->buf = tbuf;
453*4882a593Smuzhiyun um_info->size = new_size;
454*4882a593Smuzhiyun um_info->max_entries = (um_info->size / sizeof(u64));
455*4882a593Smuzhiyun }
456*4882a593Smuzhiyun
457*4882a593Smuzhiyun return tbuf;
458*4882a593Smuzhiyun }
459*4882a593Smuzhiyun
460*4882a593Smuzhiyun /**
461*4882a593Smuzhiyun * add_usable_mem - Add the usable memory ranges within the given memory range
462*4882a593Smuzhiyun * to the buffer
463*4882a593Smuzhiyun * @um_info: Usable memory buffer and ranges info.
464*4882a593Smuzhiyun * @base: Base address of memory range to look for.
465*4882a593Smuzhiyun * @end: End address of memory range to look for.
466*4882a593Smuzhiyun *
467*4882a593Smuzhiyun * Returns 0 on success, negative errno on error.
468*4882a593Smuzhiyun */
add_usable_mem(struct umem_info * um_info,u64 base,u64 end)469*4882a593Smuzhiyun static int add_usable_mem(struct umem_info *um_info, u64 base, u64 end)
470*4882a593Smuzhiyun {
471*4882a593Smuzhiyun u64 loc_base, loc_end;
472*4882a593Smuzhiyun bool add;
473*4882a593Smuzhiyun int i;
474*4882a593Smuzhiyun
475*4882a593Smuzhiyun for (i = 0; i < um_info->nr_ranges; i++) {
476*4882a593Smuzhiyun add = false;
477*4882a593Smuzhiyun loc_base = um_info->ranges[i].start;
478*4882a593Smuzhiyun loc_end = um_info->ranges[i].end;
479*4882a593Smuzhiyun if (loc_base >= base && loc_end <= end)
480*4882a593Smuzhiyun add = true;
481*4882a593Smuzhiyun else if (base < loc_end && end > loc_base) {
482*4882a593Smuzhiyun if (loc_base < base)
483*4882a593Smuzhiyun loc_base = base;
484*4882a593Smuzhiyun if (loc_end > end)
485*4882a593Smuzhiyun loc_end = end;
486*4882a593Smuzhiyun add = true;
487*4882a593Smuzhiyun }
488*4882a593Smuzhiyun
489*4882a593Smuzhiyun if (add) {
490*4882a593Smuzhiyun if (!check_realloc_usable_mem(um_info, 2))
491*4882a593Smuzhiyun return -ENOMEM;
492*4882a593Smuzhiyun
493*4882a593Smuzhiyun um_info->buf[um_info->idx++] = cpu_to_be64(loc_base);
494*4882a593Smuzhiyun um_info->buf[um_info->idx++] =
495*4882a593Smuzhiyun cpu_to_be64(loc_end - loc_base + 1);
496*4882a593Smuzhiyun }
497*4882a593Smuzhiyun }
498*4882a593Smuzhiyun
499*4882a593Smuzhiyun return 0;
500*4882a593Smuzhiyun }
501*4882a593Smuzhiyun
502*4882a593Smuzhiyun /**
503*4882a593Smuzhiyun * kdump_setup_usable_lmb - This is a callback function that gets called by
504*4882a593Smuzhiyun * walk_drmem_lmbs for every LMB to set its
505*4882a593Smuzhiyun * usable memory ranges.
506*4882a593Smuzhiyun * @lmb: LMB info.
507*4882a593Smuzhiyun * @usm: linux,drconf-usable-memory property value.
508*4882a593Smuzhiyun * @data: Pointer to usable memory buffer and ranges info.
509*4882a593Smuzhiyun *
510*4882a593Smuzhiyun * Returns 0 on success, negative errno on error.
511*4882a593Smuzhiyun */
kdump_setup_usable_lmb(struct drmem_lmb * lmb,const __be32 ** usm,void * data)512*4882a593Smuzhiyun static int kdump_setup_usable_lmb(struct drmem_lmb *lmb, const __be32 **usm,
513*4882a593Smuzhiyun void *data)
514*4882a593Smuzhiyun {
515*4882a593Smuzhiyun struct umem_info *um_info;
516*4882a593Smuzhiyun int tmp_idx, ret;
517*4882a593Smuzhiyun u64 base, end;
518*4882a593Smuzhiyun
519*4882a593Smuzhiyun /*
520*4882a593Smuzhiyun * kdump load isn't supported on kernels already booted with
521*4882a593Smuzhiyun * linux,drconf-usable-memory property.
522*4882a593Smuzhiyun */
523*4882a593Smuzhiyun if (*usm) {
524*4882a593Smuzhiyun pr_err("linux,drconf-usable-memory property already exists!");
525*4882a593Smuzhiyun return -EINVAL;
526*4882a593Smuzhiyun }
527*4882a593Smuzhiyun
528*4882a593Smuzhiyun um_info = data;
529*4882a593Smuzhiyun tmp_idx = um_info->idx;
530*4882a593Smuzhiyun if (!check_realloc_usable_mem(um_info, 1))
531*4882a593Smuzhiyun return -ENOMEM;
532*4882a593Smuzhiyun
533*4882a593Smuzhiyun um_info->idx++;
534*4882a593Smuzhiyun base = lmb->base_addr;
535*4882a593Smuzhiyun end = base + drmem_lmb_size() - 1;
536*4882a593Smuzhiyun ret = add_usable_mem(um_info, base, end);
537*4882a593Smuzhiyun if (!ret) {
538*4882a593Smuzhiyun /*
539*4882a593Smuzhiyun * Update the no. of ranges added. Two entries (base & size)
540*4882a593Smuzhiyun * for every range added.
541*4882a593Smuzhiyun */
542*4882a593Smuzhiyun um_info->buf[tmp_idx] =
543*4882a593Smuzhiyun cpu_to_be64((um_info->idx - tmp_idx - 1) / 2);
544*4882a593Smuzhiyun }
545*4882a593Smuzhiyun
546*4882a593Smuzhiyun return ret;
547*4882a593Smuzhiyun }
548*4882a593Smuzhiyun
549*4882a593Smuzhiyun #define NODE_PATH_LEN 256
550*4882a593Smuzhiyun /**
551*4882a593Smuzhiyun * add_usable_mem_property - Add usable memory property for the given
552*4882a593Smuzhiyun * memory node.
553*4882a593Smuzhiyun * @fdt: Flattened device tree for the kdump kernel.
554*4882a593Smuzhiyun * @dn: Memory node.
555*4882a593Smuzhiyun * @um_info: Usable memory buffer and ranges info.
556*4882a593Smuzhiyun *
557*4882a593Smuzhiyun * Returns 0 on success, negative errno on error.
558*4882a593Smuzhiyun */
add_usable_mem_property(void * fdt,struct device_node * dn,struct umem_info * um_info)559*4882a593Smuzhiyun static int add_usable_mem_property(void *fdt, struct device_node *dn,
560*4882a593Smuzhiyun struct umem_info *um_info)
561*4882a593Smuzhiyun {
562*4882a593Smuzhiyun int n_mem_addr_cells, n_mem_size_cells, node;
563*4882a593Smuzhiyun char path[NODE_PATH_LEN];
564*4882a593Smuzhiyun int i, len, ranges, ret;
565*4882a593Smuzhiyun const __be32 *prop;
566*4882a593Smuzhiyun u64 base, end;
567*4882a593Smuzhiyun
568*4882a593Smuzhiyun of_node_get(dn);
569*4882a593Smuzhiyun
570*4882a593Smuzhiyun if (snprintf(path, NODE_PATH_LEN, "%pOF", dn) > (NODE_PATH_LEN - 1)) {
571*4882a593Smuzhiyun pr_err("Buffer (%d) too small for memory node: %pOF\n",
572*4882a593Smuzhiyun NODE_PATH_LEN, dn);
573*4882a593Smuzhiyun return -EOVERFLOW;
574*4882a593Smuzhiyun }
575*4882a593Smuzhiyun pr_debug("Memory node path: %s\n", path);
576*4882a593Smuzhiyun
577*4882a593Smuzhiyun /* Now that we know the path, find its offset in kdump kernel's fdt */
578*4882a593Smuzhiyun node = fdt_path_offset(fdt, path);
579*4882a593Smuzhiyun if (node < 0) {
580*4882a593Smuzhiyun pr_err("Malformed device tree: error reading %s\n", path);
581*4882a593Smuzhiyun ret = -EINVAL;
582*4882a593Smuzhiyun goto out;
583*4882a593Smuzhiyun }
584*4882a593Smuzhiyun
585*4882a593Smuzhiyun /* Get the address & size cells */
586*4882a593Smuzhiyun n_mem_addr_cells = of_n_addr_cells(dn);
587*4882a593Smuzhiyun n_mem_size_cells = of_n_size_cells(dn);
588*4882a593Smuzhiyun pr_debug("address cells: %d, size cells: %d\n", n_mem_addr_cells,
589*4882a593Smuzhiyun n_mem_size_cells);
590*4882a593Smuzhiyun
591*4882a593Smuzhiyun um_info->idx = 0;
592*4882a593Smuzhiyun if (!check_realloc_usable_mem(um_info, 2)) {
593*4882a593Smuzhiyun ret = -ENOMEM;
594*4882a593Smuzhiyun goto out;
595*4882a593Smuzhiyun }
596*4882a593Smuzhiyun
597*4882a593Smuzhiyun prop = of_get_property(dn, "reg", &len);
598*4882a593Smuzhiyun if (!prop || len <= 0) {
599*4882a593Smuzhiyun ret = 0;
600*4882a593Smuzhiyun goto out;
601*4882a593Smuzhiyun }
602*4882a593Smuzhiyun
603*4882a593Smuzhiyun /*
604*4882a593Smuzhiyun * "reg" property represents sequence of (addr,size) tuples
605*4882a593Smuzhiyun * each representing a memory range.
606*4882a593Smuzhiyun */
607*4882a593Smuzhiyun ranges = (len >> 2) / (n_mem_addr_cells + n_mem_size_cells);
608*4882a593Smuzhiyun
609*4882a593Smuzhiyun for (i = 0; i < ranges; i++) {
610*4882a593Smuzhiyun base = of_read_number(prop, n_mem_addr_cells);
611*4882a593Smuzhiyun prop += n_mem_addr_cells;
612*4882a593Smuzhiyun end = base + of_read_number(prop, n_mem_size_cells) - 1;
613*4882a593Smuzhiyun prop += n_mem_size_cells;
614*4882a593Smuzhiyun
615*4882a593Smuzhiyun ret = add_usable_mem(um_info, base, end);
616*4882a593Smuzhiyun if (ret)
617*4882a593Smuzhiyun goto out;
618*4882a593Smuzhiyun }
619*4882a593Smuzhiyun
620*4882a593Smuzhiyun /*
621*4882a593Smuzhiyun * No kdump kernel usable memory found in this memory node.
622*4882a593Smuzhiyun * Write (0,0) tuple in linux,usable-memory property for
623*4882a593Smuzhiyun * this region to be ignored.
624*4882a593Smuzhiyun */
625*4882a593Smuzhiyun if (um_info->idx == 0) {
626*4882a593Smuzhiyun um_info->buf[0] = 0;
627*4882a593Smuzhiyun um_info->buf[1] = 0;
628*4882a593Smuzhiyun um_info->idx = 2;
629*4882a593Smuzhiyun }
630*4882a593Smuzhiyun
631*4882a593Smuzhiyun ret = fdt_setprop(fdt, node, "linux,usable-memory", um_info->buf,
632*4882a593Smuzhiyun (um_info->idx * sizeof(u64)));
633*4882a593Smuzhiyun
634*4882a593Smuzhiyun out:
635*4882a593Smuzhiyun of_node_put(dn);
636*4882a593Smuzhiyun return ret;
637*4882a593Smuzhiyun }
638*4882a593Smuzhiyun
639*4882a593Smuzhiyun
640*4882a593Smuzhiyun /**
641*4882a593Smuzhiyun * update_usable_mem_fdt - Updates kdump kernel's fdt with linux,usable-memory
642*4882a593Smuzhiyun * and linux,drconf-usable-memory DT properties as
643*4882a593Smuzhiyun * appropriate to restrict its memory usage.
644*4882a593Smuzhiyun * @fdt: Flattened device tree for the kdump kernel.
645*4882a593Smuzhiyun * @usable_mem: Usable memory ranges for kdump kernel.
646*4882a593Smuzhiyun *
647*4882a593Smuzhiyun * Returns 0 on success, negative errno on error.
648*4882a593Smuzhiyun */
update_usable_mem_fdt(void * fdt,struct crash_mem * usable_mem)649*4882a593Smuzhiyun static int update_usable_mem_fdt(void *fdt, struct crash_mem *usable_mem)
650*4882a593Smuzhiyun {
651*4882a593Smuzhiyun struct umem_info um_info;
652*4882a593Smuzhiyun struct device_node *dn;
653*4882a593Smuzhiyun int node, ret = 0;
654*4882a593Smuzhiyun
655*4882a593Smuzhiyun if (!usable_mem) {
656*4882a593Smuzhiyun pr_err("Usable memory ranges for kdump kernel not found\n");
657*4882a593Smuzhiyun return -ENOENT;
658*4882a593Smuzhiyun }
659*4882a593Smuzhiyun
660*4882a593Smuzhiyun node = fdt_path_offset(fdt, "/ibm,dynamic-reconfiguration-memory");
661*4882a593Smuzhiyun if (node == -FDT_ERR_NOTFOUND)
662*4882a593Smuzhiyun pr_debug("No dynamic reconfiguration memory found\n");
663*4882a593Smuzhiyun else if (node < 0) {
664*4882a593Smuzhiyun pr_err("Malformed device tree: error reading /ibm,dynamic-reconfiguration-memory.\n");
665*4882a593Smuzhiyun return -EINVAL;
666*4882a593Smuzhiyun }
667*4882a593Smuzhiyun
668*4882a593Smuzhiyun um_info.buf = NULL;
669*4882a593Smuzhiyun um_info.size = 0;
670*4882a593Smuzhiyun um_info.max_entries = 0;
671*4882a593Smuzhiyun um_info.idx = 0;
672*4882a593Smuzhiyun /* Memory ranges to look up */
673*4882a593Smuzhiyun um_info.ranges = &(usable_mem->ranges[0]);
674*4882a593Smuzhiyun um_info.nr_ranges = usable_mem->nr_ranges;
675*4882a593Smuzhiyun
676*4882a593Smuzhiyun dn = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
677*4882a593Smuzhiyun if (dn) {
678*4882a593Smuzhiyun ret = walk_drmem_lmbs(dn, &um_info, kdump_setup_usable_lmb);
679*4882a593Smuzhiyun of_node_put(dn);
680*4882a593Smuzhiyun
681*4882a593Smuzhiyun if (ret) {
682*4882a593Smuzhiyun pr_err("Could not setup linux,drconf-usable-memory property for kdump\n");
683*4882a593Smuzhiyun goto out;
684*4882a593Smuzhiyun }
685*4882a593Smuzhiyun
686*4882a593Smuzhiyun ret = fdt_setprop(fdt, node, "linux,drconf-usable-memory",
687*4882a593Smuzhiyun um_info.buf, (um_info.idx * sizeof(u64)));
688*4882a593Smuzhiyun if (ret) {
689*4882a593Smuzhiyun pr_err("Failed to update fdt with linux,drconf-usable-memory property");
690*4882a593Smuzhiyun goto out;
691*4882a593Smuzhiyun }
692*4882a593Smuzhiyun }
693*4882a593Smuzhiyun
694*4882a593Smuzhiyun /*
695*4882a593Smuzhiyun * Walk through each memory node and set linux,usable-memory property
696*4882a593Smuzhiyun * for the corresponding node in kdump kernel's fdt.
697*4882a593Smuzhiyun */
698*4882a593Smuzhiyun for_each_node_by_type(dn, "memory") {
699*4882a593Smuzhiyun ret = add_usable_mem_property(fdt, dn, &um_info);
700*4882a593Smuzhiyun if (ret) {
701*4882a593Smuzhiyun pr_err("Failed to set linux,usable-memory property for %s node",
702*4882a593Smuzhiyun dn->full_name);
703*4882a593Smuzhiyun goto out;
704*4882a593Smuzhiyun }
705*4882a593Smuzhiyun }
706*4882a593Smuzhiyun
707*4882a593Smuzhiyun out:
708*4882a593Smuzhiyun kfree(um_info.buf);
709*4882a593Smuzhiyun return ret;
710*4882a593Smuzhiyun }
711*4882a593Smuzhiyun
712*4882a593Smuzhiyun /**
713*4882a593Smuzhiyun * load_backup_segment - Locate a memory hole to place the backup region.
714*4882a593Smuzhiyun * @image: Kexec image.
715*4882a593Smuzhiyun * @kbuf: Buffer contents and memory parameters.
716*4882a593Smuzhiyun *
717*4882a593Smuzhiyun * Returns 0 on success, negative errno on error.
718*4882a593Smuzhiyun */
load_backup_segment(struct kimage * image,struct kexec_buf * kbuf)719*4882a593Smuzhiyun static int load_backup_segment(struct kimage *image, struct kexec_buf *kbuf)
720*4882a593Smuzhiyun {
721*4882a593Smuzhiyun void *buf;
722*4882a593Smuzhiyun int ret;
723*4882a593Smuzhiyun
724*4882a593Smuzhiyun /*
725*4882a593Smuzhiyun * Setup a source buffer for backup segment.
726*4882a593Smuzhiyun *
727*4882a593Smuzhiyun * A source buffer has no meaning for backup region as data will
728*4882a593Smuzhiyun * be copied from backup source, after crash, in the purgatory.
729*4882a593Smuzhiyun * But as load segment code doesn't recognize such segments,
730*4882a593Smuzhiyun * setup a dummy source buffer to keep it happy for now.
731*4882a593Smuzhiyun */
732*4882a593Smuzhiyun buf = vzalloc(BACKUP_SRC_SIZE);
733*4882a593Smuzhiyun if (!buf)
734*4882a593Smuzhiyun return -ENOMEM;
735*4882a593Smuzhiyun
736*4882a593Smuzhiyun kbuf->buffer = buf;
737*4882a593Smuzhiyun kbuf->mem = KEXEC_BUF_MEM_UNKNOWN;
738*4882a593Smuzhiyun kbuf->bufsz = kbuf->memsz = BACKUP_SRC_SIZE;
739*4882a593Smuzhiyun kbuf->top_down = false;
740*4882a593Smuzhiyun
741*4882a593Smuzhiyun ret = kexec_add_buffer(kbuf);
742*4882a593Smuzhiyun if (ret) {
743*4882a593Smuzhiyun vfree(buf);
744*4882a593Smuzhiyun return ret;
745*4882a593Smuzhiyun }
746*4882a593Smuzhiyun
747*4882a593Smuzhiyun image->arch.backup_buf = buf;
748*4882a593Smuzhiyun image->arch.backup_start = kbuf->mem;
749*4882a593Smuzhiyun return 0;
750*4882a593Smuzhiyun }
751*4882a593Smuzhiyun
752*4882a593Smuzhiyun /**
753*4882a593Smuzhiyun * update_backup_region_phdr - Update backup region's offset for the core to
754*4882a593Smuzhiyun * export the region appropriately.
755*4882a593Smuzhiyun * @image: Kexec image.
756*4882a593Smuzhiyun * @ehdr: ELF core header.
757*4882a593Smuzhiyun *
758*4882a593Smuzhiyun * Assumes an exclusive program header is setup for the backup region
759*4882a593Smuzhiyun * in the ELF headers
760*4882a593Smuzhiyun *
761*4882a593Smuzhiyun * Returns nothing.
762*4882a593Smuzhiyun */
update_backup_region_phdr(struct kimage * image,Elf64_Ehdr * ehdr)763*4882a593Smuzhiyun static void update_backup_region_phdr(struct kimage *image, Elf64_Ehdr *ehdr)
764*4882a593Smuzhiyun {
765*4882a593Smuzhiyun Elf64_Phdr *phdr;
766*4882a593Smuzhiyun unsigned int i;
767*4882a593Smuzhiyun
768*4882a593Smuzhiyun phdr = (Elf64_Phdr *)(ehdr + 1);
769*4882a593Smuzhiyun for (i = 0; i < ehdr->e_phnum; i++) {
770*4882a593Smuzhiyun if (phdr->p_paddr == BACKUP_SRC_START) {
771*4882a593Smuzhiyun phdr->p_offset = image->arch.backup_start;
772*4882a593Smuzhiyun pr_debug("Backup region offset updated to 0x%lx\n",
773*4882a593Smuzhiyun image->arch.backup_start);
774*4882a593Smuzhiyun return;
775*4882a593Smuzhiyun }
776*4882a593Smuzhiyun }
777*4882a593Smuzhiyun }
778*4882a593Smuzhiyun
779*4882a593Smuzhiyun /**
780*4882a593Smuzhiyun * load_elfcorehdr_segment - Setup crash memory ranges and initialize elfcorehdr
781*4882a593Smuzhiyun * segment needed to load kdump kernel.
782*4882a593Smuzhiyun * @image: Kexec image.
783*4882a593Smuzhiyun * @kbuf: Buffer contents and memory parameters.
784*4882a593Smuzhiyun *
785*4882a593Smuzhiyun * Returns 0 on success, negative errno on error.
786*4882a593Smuzhiyun */
load_elfcorehdr_segment(struct kimage * image,struct kexec_buf * kbuf)787*4882a593Smuzhiyun static int load_elfcorehdr_segment(struct kimage *image, struct kexec_buf *kbuf)
788*4882a593Smuzhiyun {
789*4882a593Smuzhiyun struct crash_mem *cmem = NULL;
790*4882a593Smuzhiyun unsigned long headers_sz;
791*4882a593Smuzhiyun void *headers = NULL;
792*4882a593Smuzhiyun int ret;
793*4882a593Smuzhiyun
794*4882a593Smuzhiyun ret = get_crash_memory_ranges(&cmem);
795*4882a593Smuzhiyun if (ret)
796*4882a593Smuzhiyun goto out;
797*4882a593Smuzhiyun
798*4882a593Smuzhiyun /* Setup elfcorehdr segment */
799*4882a593Smuzhiyun ret = crash_prepare_elf64_headers(cmem, false, &headers, &headers_sz);
800*4882a593Smuzhiyun if (ret) {
801*4882a593Smuzhiyun pr_err("Failed to prepare elf headers for the core\n");
802*4882a593Smuzhiyun goto out;
803*4882a593Smuzhiyun }
804*4882a593Smuzhiyun
805*4882a593Smuzhiyun /* Fix the offset for backup region in the ELF header */
806*4882a593Smuzhiyun update_backup_region_phdr(image, headers);
807*4882a593Smuzhiyun
808*4882a593Smuzhiyun kbuf->buffer = headers;
809*4882a593Smuzhiyun kbuf->mem = KEXEC_BUF_MEM_UNKNOWN;
810*4882a593Smuzhiyun kbuf->bufsz = kbuf->memsz = headers_sz;
811*4882a593Smuzhiyun kbuf->top_down = false;
812*4882a593Smuzhiyun
813*4882a593Smuzhiyun ret = kexec_add_buffer(kbuf);
814*4882a593Smuzhiyun if (ret) {
815*4882a593Smuzhiyun vfree(headers);
816*4882a593Smuzhiyun goto out;
817*4882a593Smuzhiyun }
818*4882a593Smuzhiyun
819*4882a593Smuzhiyun image->arch.elfcorehdr_addr = kbuf->mem;
820*4882a593Smuzhiyun image->arch.elf_headers_sz = headers_sz;
821*4882a593Smuzhiyun image->arch.elf_headers = headers;
822*4882a593Smuzhiyun out:
823*4882a593Smuzhiyun kfree(cmem);
824*4882a593Smuzhiyun return ret;
825*4882a593Smuzhiyun }
826*4882a593Smuzhiyun
827*4882a593Smuzhiyun /**
828*4882a593Smuzhiyun * load_crashdump_segments_ppc64 - Initialize the additional segements needed
829*4882a593Smuzhiyun * to load kdump kernel.
830*4882a593Smuzhiyun * @image: Kexec image.
831*4882a593Smuzhiyun * @kbuf: Buffer contents and memory parameters.
832*4882a593Smuzhiyun *
833*4882a593Smuzhiyun * Returns 0 on success, negative errno on error.
834*4882a593Smuzhiyun */
load_crashdump_segments_ppc64(struct kimage * image,struct kexec_buf * kbuf)835*4882a593Smuzhiyun int load_crashdump_segments_ppc64(struct kimage *image,
836*4882a593Smuzhiyun struct kexec_buf *kbuf)
837*4882a593Smuzhiyun {
838*4882a593Smuzhiyun int ret;
839*4882a593Smuzhiyun
840*4882a593Smuzhiyun /* Load backup segment - first 64K bytes of the crashing kernel */
841*4882a593Smuzhiyun ret = load_backup_segment(image, kbuf);
842*4882a593Smuzhiyun if (ret) {
843*4882a593Smuzhiyun pr_err("Failed to load backup segment\n");
844*4882a593Smuzhiyun return ret;
845*4882a593Smuzhiyun }
846*4882a593Smuzhiyun pr_debug("Loaded the backup region at 0x%lx\n", kbuf->mem);
847*4882a593Smuzhiyun
848*4882a593Smuzhiyun /* Load elfcorehdr segment - to export crashing kernel's vmcore */
849*4882a593Smuzhiyun ret = load_elfcorehdr_segment(image, kbuf);
850*4882a593Smuzhiyun if (ret) {
851*4882a593Smuzhiyun pr_err("Failed to load elfcorehdr segment\n");
852*4882a593Smuzhiyun return ret;
853*4882a593Smuzhiyun }
854*4882a593Smuzhiyun pr_debug("Loaded elf core header at 0x%lx, bufsz=0x%lx memsz=0x%lx\n",
855*4882a593Smuzhiyun image->arch.elfcorehdr_addr, kbuf->bufsz, kbuf->memsz);
856*4882a593Smuzhiyun
857*4882a593Smuzhiyun return 0;
858*4882a593Smuzhiyun }
859*4882a593Smuzhiyun
860*4882a593Smuzhiyun /**
861*4882a593Smuzhiyun * setup_purgatory_ppc64 - initialize PPC64 specific purgatory's global
862*4882a593Smuzhiyun * variables and call setup_purgatory() to initialize
863*4882a593Smuzhiyun * common global variable.
864*4882a593Smuzhiyun * @image: kexec image.
865*4882a593Smuzhiyun * @slave_code: Slave code for the purgatory.
866*4882a593Smuzhiyun * @fdt: Flattened device tree for the next kernel.
867*4882a593Smuzhiyun * @kernel_load_addr: Address where the kernel is loaded.
868*4882a593Smuzhiyun * @fdt_load_addr: Address where the flattened device tree is loaded.
869*4882a593Smuzhiyun *
870*4882a593Smuzhiyun * Returns 0 on success, negative errno on error.
871*4882a593Smuzhiyun */
setup_purgatory_ppc64(struct kimage * image,const void * slave_code,const void * fdt,unsigned long kernel_load_addr,unsigned long fdt_load_addr)872*4882a593Smuzhiyun int setup_purgatory_ppc64(struct kimage *image, const void *slave_code,
873*4882a593Smuzhiyun const void *fdt, unsigned long kernel_load_addr,
874*4882a593Smuzhiyun unsigned long fdt_load_addr)
875*4882a593Smuzhiyun {
876*4882a593Smuzhiyun struct device_node *dn = NULL;
877*4882a593Smuzhiyun int ret;
878*4882a593Smuzhiyun
879*4882a593Smuzhiyun ret = setup_purgatory(image, slave_code, fdt, kernel_load_addr,
880*4882a593Smuzhiyun fdt_load_addr);
881*4882a593Smuzhiyun if (ret)
882*4882a593Smuzhiyun goto out;
883*4882a593Smuzhiyun
884*4882a593Smuzhiyun if (image->type == KEXEC_TYPE_CRASH) {
885*4882a593Smuzhiyun u32 my_run_at_load = 1;
886*4882a593Smuzhiyun
887*4882a593Smuzhiyun /*
888*4882a593Smuzhiyun * Tell relocatable kernel to run at load address
889*4882a593Smuzhiyun * via the word meant for that at 0x5c.
890*4882a593Smuzhiyun */
891*4882a593Smuzhiyun ret = kexec_purgatory_get_set_symbol(image, "run_at_load",
892*4882a593Smuzhiyun &my_run_at_load,
893*4882a593Smuzhiyun sizeof(my_run_at_load),
894*4882a593Smuzhiyun false);
895*4882a593Smuzhiyun if (ret)
896*4882a593Smuzhiyun goto out;
897*4882a593Smuzhiyun }
898*4882a593Smuzhiyun
899*4882a593Smuzhiyun /* Tell purgatory where to look for backup region */
900*4882a593Smuzhiyun ret = kexec_purgatory_get_set_symbol(image, "backup_start",
901*4882a593Smuzhiyun &image->arch.backup_start,
902*4882a593Smuzhiyun sizeof(image->arch.backup_start),
903*4882a593Smuzhiyun false);
904*4882a593Smuzhiyun if (ret)
905*4882a593Smuzhiyun goto out;
906*4882a593Smuzhiyun
907*4882a593Smuzhiyun /* Setup OPAL base & entry values */
908*4882a593Smuzhiyun dn = of_find_node_by_path("/ibm,opal");
909*4882a593Smuzhiyun if (dn) {
910*4882a593Smuzhiyun u64 val;
911*4882a593Smuzhiyun
912*4882a593Smuzhiyun of_property_read_u64(dn, "opal-base-address", &val);
913*4882a593Smuzhiyun ret = kexec_purgatory_get_set_symbol(image, "opal_base", &val,
914*4882a593Smuzhiyun sizeof(val), false);
915*4882a593Smuzhiyun if (ret)
916*4882a593Smuzhiyun goto out;
917*4882a593Smuzhiyun
918*4882a593Smuzhiyun of_property_read_u64(dn, "opal-entry-address", &val);
919*4882a593Smuzhiyun ret = kexec_purgatory_get_set_symbol(image, "opal_entry", &val,
920*4882a593Smuzhiyun sizeof(val), false);
921*4882a593Smuzhiyun }
922*4882a593Smuzhiyun out:
923*4882a593Smuzhiyun if (ret)
924*4882a593Smuzhiyun pr_err("Failed to setup purgatory symbols");
925*4882a593Smuzhiyun of_node_put(dn);
926*4882a593Smuzhiyun return ret;
927*4882a593Smuzhiyun }
928*4882a593Smuzhiyun
929*4882a593Smuzhiyun /**
930*4882a593Smuzhiyun * kexec_fdt_totalsize_ppc64 - Return the estimated size needed to setup FDT
931*4882a593Smuzhiyun * for kexec/kdump kernel.
932*4882a593Smuzhiyun * @image: kexec image being loaded.
933*4882a593Smuzhiyun *
934*4882a593Smuzhiyun * Returns the estimated size needed for kexec/kdump kernel FDT.
935*4882a593Smuzhiyun */
kexec_fdt_totalsize_ppc64(struct kimage * image)936*4882a593Smuzhiyun unsigned int kexec_fdt_totalsize_ppc64(struct kimage *image)
937*4882a593Smuzhiyun {
938*4882a593Smuzhiyun unsigned int fdt_size;
939*4882a593Smuzhiyun u64 usm_entries;
940*4882a593Smuzhiyun
941*4882a593Smuzhiyun /*
942*4882a593Smuzhiyun * The below estimate more than accounts for a typical kexec case where
943*4882a593Smuzhiyun * the additional space is to accommodate things like kexec cmdline,
944*4882a593Smuzhiyun * chosen node with properties for initrd start & end addresses and
945*4882a593Smuzhiyun * a property to indicate kexec boot..
946*4882a593Smuzhiyun */
947*4882a593Smuzhiyun fdt_size = fdt_totalsize(initial_boot_params) + (2 * COMMAND_LINE_SIZE);
948*4882a593Smuzhiyun if (image->type != KEXEC_TYPE_CRASH)
949*4882a593Smuzhiyun return fdt_size;
950*4882a593Smuzhiyun
951*4882a593Smuzhiyun /*
952*4882a593Smuzhiyun * For kdump kernel, also account for linux,usable-memory and
953*4882a593Smuzhiyun * linux,drconf-usable-memory properties. Get an approximate on the
954*4882a593Smuzhiyun * number of usable memory entries and use for FDT size estimation.
955*4882a593Smuzhiyun */
956*4882a593Smuzhiyun usm_entries = ((memblock_end_of_DRAM() / drmem_lmb_size()) +
957*4882a593Smuzhiyun (2 * (resource_size(&crashk_res) / drmem_lmb_size())));
958*4882a593Smuzhiyun fdt_size += (unsigned int)(usm_entries * sizeof(u64));
959*4882a593Smuzhiyun
960*4882a593Smuzhiyun return fdt_size;
961*4882a593Smuzhiyun }
962*4882a593Smuzhiyun
963*4882a593Smuzhiyun /**
964*4882a593Smuzhiyun * add_node_props - Reads node properties from device node structure and add
965*4882a593Smuzhiyun * them to fdt.
966*4882a593Smuzhiyun * @fdt: Flattened device tree of the kernel
967*4882a593Smuzhiyun * @node_offset: offset of the node to add a property at
968*4882a593Smuzhiyun * @dn: device node pointer
969*4882a593Smuzhiyun *
970*4882a593Smuzhiyun * Returns 0 on success, negative errno on error.
971*4882a593Smuzhiyun */
add_node_props(void * fdt,int node_offset,const struct device_node * dn)972*4882a593Smuzhiyun static int add_node_props(void *fdt, int node_offset, const struct device_node *dn)
973*4882a593Smuzhiyun {
974*4882a593Smuzhiyun int ret = 0;
975*4882a593Smuzhiyun struct property *pp;
976*4882a593Smuzhiyun
977*4882a593Smuzhiyun if (!dn)
978*4882a593Smuzhiyun return -EINVAL;
979*4882a593Smuzhiyun
980*4882a593Smuzhiyun for_each_property_of_node(dn, pp) {
981*4882a593Smuzhiyun ret = fdt_setprop(fdt, node_offset, pp->name, pp->value, pp->length);
982*4882a593Smuzhiyun if (ret < 0) {
983*4882a593Smuzhiyun pr_err("Unable to add %s property: %s\n", pp->name, fdt_strerror(ret));
984*4882a593Smuzhiyun return ret;
985*4882a593Smuzhiyun }
986*4882a593Smuzhiyun }
987*4882a593Smuzhiyun return ret;
988*4882a593Smuzhiyun }
989*4882a593Smuzhiyun
990*4882a593Smuzhiyun /**
991*4882a593Smuzhiyun * update_cpus_node - Update cpus node of flattened device tree using of_root
992*4882a593Smuzhiyun * device node.
993*4882a593Smuzhiyun * @fdt: Flattened device tree of the kernel.
994*4882a593Smuzhiyun *
995*4882a593Smuzhiyun * Returns 0 on success, negative errno on error.
996*4882a593Smuzhiyun */
update_cpus_node(void * fdt)997*4882a593Smuzhiyun static int update_cpus_node(void *fdt)
998*4882a593Smuzhiyun {
999*4882a593Smuzhiyun struct device_node *cpus_node, *dn;
1000*4882a593Smuzhiyun int cpus_offset, cpus_subnode_offset, ret = 0;
1001*4882a593Smuzhiyun
1002*4882a593Smuzhiyun cpus_offset = fdt_path_offset(fdt, "/cpus");
1003*4882a593Smuzhiyun if (cpus_offset < 0 && cpus_offset != -FDT_ERR_NOTFOUND) {
1004*4882a593Smuzhiyun pr_err("Malformed device tree: error reading /cpus node: %s\n",
1005*4882a593Smuzhiyun fdt_strerror(cpus_offset));
1006*4882a593Smuzhiyun return cpus_offset;
1007*4882a593Smuzhiyun }
1008*4882a593Smuzhiyun
1009*4882a593Smuzhiyun if (cpus_offset > 0) {
1010*4882a593Smuzhiyun ret = fdt_del_node(fdt, cpus_offset);
1011*4882a593Smuzhiyun if (ret < 0) {
1012*4882a593Smuzhiyun pr_err("Error deleting /cpus node: %s\n", fdt_strerror(ret));
1013*4882a593Smuzhiyun return -EINVAL;
1014*4882a593Smuzhiyun }
1015*4882a593Smuzhiyun }
1016*4882a593Smuzhiyun
1017*4882a593Smuzhiyun /* Add cpus node to fdt */
1018*4882a593Smuzhiyun cpus_offset = fdt_add_subnode(fdt, fdt_path_offset(fdt, "/"), "cpus");
1019*4882a593Smuzhiyun if (cpus_offset < 0) {
1020*4882a593Smuzhiyun pr_err("Error creating /cpus node: %s\n", fdt_strerror(cpus_offset));
1021*4882a593Smuzhiyun return -EINVAL;
1022*4882a593Smuzhiyun }
1023*4882a593Smuzhiyun
1024*4882a593Smuzhiyun /* Add cpus node properties */
1025*4882a593Smuzhiyun cpus_node = of_find_node_by_path("/cpus");
1026*4882a593Smuzhiyun ret = add_node_props(fdt, cpus_offset, cpus_node);
1027*4882a593Smuzhiyun of_node_put(cpus_node);
1028*4882a593Smuzhiyun if (ret < 0)
1029*4882a593Smuzhiyun return ret;
1030*4882a593Smuzhiyun
1031*4882a593Smuzhiyun /* Loop through all subnodes of cpus and add them to fdt */
1032*4882a593Smuzhiyun for_each_node_by_type(dn, "cpu") {
1033*4882a593Smuzhiyun cpus_subnode_offset = fdt_add_subnode(fdt, cpus_offset, dn->full_name);
1034*4882a593Smuzhiyun if (cpus_subnode_offset < 0) {
1035*4882a593Smuzhiyun pr_err("Unable to add %s subnode: %s\n", dn->full_name,
1036*4882a593Smuzhiyun fdt_strerror(cpus_subnode_offset));
1037*4882a593Smuzhiyun ret = cpus_subnode_offset;
1038*4882a593Smuzhiyun goto out;
1039*4882a593Smuzhiyun }
1040*4882a593Smuzhiyun
1041*4882a593Smuzhiyun ret = add_node_props(fdt, cpus_subnode_offset, dn);
1042*4882a593Smuzhiyun if (ret < 0)
1043*4882a593Smuzhiyun goto out;
1044*4882a593Smuzhiyun }
1045*4882a593Smuzhiyun out:
1046*4882a593Smuzhiyun of_node_put(dn);
1047*4882a593Smuzhiyun return ret;
1048*4882a593Smuzhiyun }
1049*4882a593Smuzhiyun
1050*4882a593Smuzhiyun /**
1051*4882a593Smuzhiyun * setup_new_fdt_ppc64 - Update the flattend device-tree of the kernel
1052*4882a593Smuzhiyun * being loaded.
1053*4882a593Smuzhiyun * @image: kexec image being loaded.
1054*4882a593Smuzhiyun * @fdt: Flattened device tree for the next kernel.
1055*4882a593Smuzhiyun * @initrd_load_addr: Address where the next initrd will be loaded.
1056*4882a593Smuzhiyun * @initrd_len: Size of the next initrd, or 0 if there will be none.
1057*4882a593Smuzhiyun * @cmdline: Command line for the next kernel, or NULL if there will
1058*4882a593Smuzhiyun * be none.
1059*4882a593Smuzhiyun *
1060*4882a593Smuzhiyun * Returns 0 on success, negative errno on error.
1061*4882a593Smuzhiyun */
setup_new_fdt_ppc64(const struct kimage * image,void * fdt,unsigned long initrd_load_addr,unsigned long initrd_len,const char * cmdline)1062*4882a593Smuzhiyun int setup_new_fdt_ppc64(const struct kimage *image, void *fdt,
1063*4882a593Smuzhiyun unsigned long initrd_load_addr,
1064*4882a593Smuzhiyun unsigned long initrd_len, const char *cmdline)
1065*4882a593Smuzhiyun {
1066*4882a593Smuzhiyun struct crash_mem *umem = NULL, *rmem = NULL;
1067*4882a593Smuzhiyun int i, nr_ranges, ret;
1068*4882a593Smuzhiyun
1069*4882a593Smuzhiyun ret = setup_new_fdt(image, fdt, initrd_load_addr, initrd_len, cmdline);
1070*4882a593Smuzhiyun if (ret)
1071*4882a593Smuzhiyun goto out;
1072*4882a593Smuzhiyun
1073*4882a593Smuzhiyun /*
1074*4882a593Smuzhiyun * Restrict memory usage for kdump kernel by setting up
1075*4882a593Smuzhiyun * usable memory ranges and memory reserve map.
1076*4882a593Smuzhiyun */
1077*4882a593Smuzhiyun if (image->type == KEXEC_TYPE_CRASH) {
1078*4882a593Smuzhiyun ret = get_usable_memory_ranges(&umem);
1079*4882a593Smuzhiyun if (ret)
1080*4882a593Smuzhiyun goto out;
1081*4882a593Smuzhiyun
1082*4882a593Smuzhiyun ret = update_usable_mem_fdt(fdt, umem);
1083*4882a593Smuzhiyun if (ret) {
1084*4882a593Smuzhiyun pr_err("Error setting up usable-memory property for kdump kernel\n");
1085*4882a593Smuzhiyun goto out;
1086*4882a593Smuzhiyun }
1087*4882a593Smuzhiyun
1088*4882a593Smuzhiyun /*
1089*4882a593Smuzhiyun * Ensure we don't touch crashed kernel's memory except the
1090*4882a593Smuzhiyun * first 64K of RAM, which will be backed up.
1091*4882a593Smuzhiyun */
1092*4882a593Smuzhiyun ret = fdt_add_mem_rsv(fdt, BACKUP_SRC_END + 1,
1093*4882a593Smuzhiyun crashk_res.start - BACKUP_SRC_SIZE);
1094*4882a593Smuzhiyun if (ret) {
1095*4882a593Smuzhiyun pr_err("Error reserving crash memory: %s\n",
1096*4882a593Smuzhiyun fdt_strerror(ret));
1097*4882a593Smuzhiyun goto out;
1098*4882a593Smuzhiyun }
1099*4882a593Smuzhiyun
1100*4882a593Smuzhiyun /* Ensure backup region is not used by kdump/capture kernel */
1101*4882a593Smuzhiyun ret = fdt_add_mem_rsv(fdt, image->arch.backup_start,
1102*4882a593Smuzhiyun BACKUP_SRC_SIZE);
1103*4882a593Smuzhiyun if (ret) {
1104*4882a593Smuzhiyun pr_err("Error reserving memory for backup: %s\n",
1105*4882a593Smuzhiyun fdt_strerror(ret));
1106*4882a593Smuzhiyun goto out;
1107*4882a593Smuzhiyun }
1108*4882a593Smuzhiyun }
1109*4882a593Smuzhiyun
1110*4882a593Smuzhiyun /* Update cpus nodes information to account hotplug CPUs. */
1111*4882a593Smuzhiyun ret = update_cpus_node(fdt);
1112*4882a593Smuzhiyun if (ret < 0)
1113*4882a593Smuzhiyun goto out;
1114*4882a593Smuzhiyun
1115*4882a593Smuzhiyun /* Update memory reserve map */
1116*4882a593Smuzhiyun ret = get_reserved_memory_ranges(&rmem);
1117*4882a593Smuzhiyun if (ret)
1118*4882a593Smuzhiyun goto out;
1119*4882a593Smuzhiyun
1120*4882a593Smuzhiyun nr_ranges = rmem ? rmem->nr_ranges : 0;
1121*4882a593Smuzhiyun for (i = 0; i < nr_ranges; i++) {
1122*4882a593Smuzhiyun u64 base, size;
1123*4882a593Smuzhiyun
1124*4882a593Smuzhiyun base = rmem->ranges[i].start;
1125*4882a593Smuzhiyun size = rmem->ranges[i].end - base + 1;
1126*4882a593Smuzhiyun ret = fdt_add_mem_rsv(fdt, base, size);
1127*4882a593Smuzhiyun if (ret) {
1128*4882a593Smuzhiyun pr_err("Error updating memory reserve map: %s\n",
1129*4882a593Smuzhiyun fdt_strerror(ret));
1130*4882a593Smuzhiyun goto out;
1131*4882a593Smuzhiyun }
1132*4882a593Smuzhiyun }
1133*4882a593Smuzhiyun
1134*4882a593Smuzhiyun out:
1135*4882a593Smuzhiyun kfree(rmem);
1136*4882a593Smuzhiyun kfree(umem);
1137*4882a593Smuzhiyun return ret;
1138*4882a593Smuzhiyun }
1139*4882a593Smuzhiyun
1140*4882a593Smuzhiyun /**
1141*4882a593Smuzhiyun * arch_kexec_locate_mem_hole - Skip special memory regions like rtas, opal,
1142*4882a593Smuzhiyun * tce-table, reserved-ranges & such (exclude
1143*4882a593Smuzhiyun * memory ranges) as they can't be used for kexec
1144*4882a593Smuzhiyun * segment buffer. Sets kbuf->mem when a suitable
1145*4882a593Smuzhiyun * memory hole is found.
1146*4882a593Smuzhiyun * @kbuf: Buffer contents and memory parameters.
1147*4882a593Smuzhiyun *
1148*4882a593Smuzhiyun * Assumes minimum of PAGE_SIZE alignment for kbuf->memsz & kbuf->buf_align.
1149*4882a593Smuzhiyun *
1150*4882a593Smuzhiyun * Returns 0 on success, negative errno on error.
1151*4882a593Smuzhiyun */
arch_kexec_locate_mem_hole(struct kexec_buf * kbuf)1152*4882a593Smuzhiyun int arch_kexec_locate_mem_hole(struct kexec_buf *kbuf)
1153*4882a593Smuzhiyun {
1154*4882a593Smuzhiyun struct crash_mem **emem;
1155*4882a593Smuzhiyun u64 buf_min, buf_max;
1156*4882a593Smuzhiyun int ret;
1157*4882a593Smuzhiyun
1158*4882a593Smuzhiyun /* Look up the exclude ranges list while locating the memory hole */
1159*4882a593Smuzhiyun emem = &(kbuf->image->arch.exclude_ranges);
1160*4882a593Smuzhiyun if (!(*emem) || ((*emem)->nr_ranges == 0)) {
1161*4882a593Smuzhiyun pr_warn("No exclude range list. Using the default locate mem hole method\n");
1162*4882a593Smuzhiyun return kexec_locate_mem_hole(kbuf);
1163*4882a593Smuzhiyun }
1164*4882a593Smuzhiyun
1165*4882a593Smuzhiyun buf_min = kbuf->buf_min;
1166*4882a593Smuzhiyun buf_max = kbuf->buf_max;
1167*4882a593Smuzhiyun /* Segments for kdump kernel should be within crashkernel region */
1168*4882a593Smuzhiyun if (kbuf->image->type == KEXEC_TYPE_CRASH) {
1169*4882a593Smuzhiyun buf_min = (buf_min < crashk_res.start ?
1170*4882a593Smuzhiyun crashk_res.start : buf_min);
1171*4882a593Smuzhiyun buf_max = (buf_max > crashk_res.end ?
1172*4882a593Smuzhiyun crashk_res.end : buf_max);
1173*4882a593Smuzhiyun }
1174*4882a593Smuzhiyun
1175*4882a593Smuzhiyun if (buf_min > buf_max) {
1176*4882a593Smuzhiyun pr_err("Invalid buffer min and/or max values\n");
1177*4882a593Smuzhiyun return -EINVAL;
1178*4882a593Smuzhiyun }
1179*4882a593Smuzhiyun
1180*4882a593Smuzhiyun if (kbuf->top_down)
1181*4882a593Smuzhiyun ret = locate_mem_hole_top_down_ppc64(kbuf, buf_min, buf_max,
1182*4882a593Smuzhiyun *emem);
1183*4882a593Smuzhiyun else
1184*4882a593Smuzhiyun ret = locate_mem_hole_bottom_up_ppc64(kbuf, buf_min, buf_max,
1185*4882a593Smuzhiyun *emem);
1186*4882a593Smuzhiyun
1187*4882a593Smuzhiyun /* Add the buffer allocated to the exclude list for the next lookup */
1188*4882a593Smuzhiyun if (!ret) {
1189*4882a593Smuzhiyun add_mem_range(emem, kbuf->mem, kbuf->memsz);
1190*4882a593Smuzhiyun sort_memory_ranges(*emem, true);
1191*4882a593Smuzhiyun } else {
1192*4882a593Smuzhiyun pr_err("Failed to locate memory buffer of size %lu\n",
1193*4882a593Smuzhiyun kbuf->memsz);
1194*4882a593Smuzhiyun }
1195*4882a593Smuzhiyun return ret;
1196*4882a593Smuzhiyun }
1197*4882a593Smuzhiyun
1198*4882a593Smuzhiyun /**
1199*4882a593Smuzhiyun * arch_kexec_kernel_image_probe - Does additional handling needed to setup
1200*4882a593Smuzhiyun * kexec segments.
1201*4882a593Smuzhiyun * @image: kexec image being loaded.
1202*4882a593Smuzhiyun * @buf: Buffer pointing to elf data.
1203*4882a593Smuzhiyun * @buf_len: Length of the buffer.
1204*4882a593Smuzhiyun *
1205*4882a593Smuzhiyun * Returns 0 on success, negative errno on error.
1206*4882a593Smuzhiyun */
arch_kexec_kernel_image_probe(struct kimage * image,void * buf,unsigned long buf_len)1207*4882a593Smuzhiyun int arch_kexec_kernel_image_probe(struct kimage *image, void *buf,
1208*4882a593Smuzhiyun unsigned long buf_len)
1209*4882a593Smuzhiyun {
1210*4882a593Smuzhiyun int ret;
1211*4882a593Smuzhiyun
1212*4882a593Smuzhiyun /* Get exclude memory ranges needed for setting up kexec segments */
1213*4882a593Smuzhiyun ret = get_exclude_memory_ranges(&(image->arch.exclude_ranges));
1214*4882a593Smuzhiyun if (ret) {
1215*4882a593Smuzhiyun pr_err("Failed to setup exclude memory ranges for buffer lookup\n");
1216*4882a593Smuzhiyun return ret;
1217*4882a593Smuzhiyun }
1218*4882a593Smuzhiyun
1219*4882a593Smuzhiyun return kexec_image_probe_default(image, buf, buf_len);
1220*4882a593Smuzhiyun }
1221*4882a593Smuzhiyun
1222*4882a593Smuzhiyun /**
1223*4882a593Smuzhiyun * arch_kimage_file_post_load_cleanup - Frees up all the allocations done
1224*4882a593Smuzhiyun * while loading the image.
1225*4882a593Smuzhiyun * @image: kexec image being loaded.
1226*4882a593Smuzhiyun *
1227*4882a593Smuzhiyun * Returns 0 on success, negative errno on error.
1228*4882a593Smuzhiyun */
arch_kimage_file_post_load_cleanup(struct kimage * image)1229*4882a593Smuzhiyun int arch_kimage_file_post_load_cleanup(struct kimage *image)
1230*4882a593Smuzhiyun {
1231*4882a593Smuzhiyun kfree(image->arch.exclude_ranges);
1232*4882a593Smuzhiyun image->arch.exclude_ranges = NULL;
1233*4882a593Smuzhiyun
1234*4882a593Smuzhiyun vfree(image->arch.backup_buf);
1235*4882a593Smuzhiyun image->arch.backup_buf = NULL;
1236*4882a593Smuzhiyun
1237*4882a593Smuzhiyun vfree(image->arch.elf_headers);
1238*4882a593Smuzhiyun image->arch.elf_headers = NULL;
1239*4882a593Smuzhiyun image->arch.elf_headers_sz = 0;
1240*4882a593Smuzhiyun
1241*4882a593Smuzhiyun return kexec_image_post_load_cleanup_default(image);
1242*4882a593Smuzhiyun }
1243