1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-or-later
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Firmware-Assisted Dump support on POWER platform (OPAL).
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * Copyright 2019, Hari Bathini, IBM Corporation.
6*4882a593Smuzhiyun */
7*4882a593Smuzhiyun
8*4882a593Smuzhiyun #define pr_fmt(fmt) "opal fadump: " fmt
9*4882a593Smuzhiyun
10*4882a593Smuzhiyun #include <linux/string.h>
11*4882a593Smuzhiyun #include <linux/seq_file.h>
12*4882a593Smuzhiyun #include <linux/of.h>
13*4882a593Smuzhiyun #include <linux/of_fdt.h>
14*4882a593Smuzhiyun #include <linux/libfdt.h>
15*4882a593Smuzhiyun #include <linux/mm.h>
16*4882a593Smuzhiyun #include <linux/crash_dump.h>
17*4882a593Smuzhiyun
18*4882a593Smuzhiyun #include <asm/page.h>
19*4882a593Smuzhiyun #include <asm/opal.h>
20*4882a593Smuzhiyun #include <asm/fadump-internal.h>
21*4882a593Smuzhiyun
22*4882a593Smuzhiyun #include "opal-fadump.h"
23*4882a593Smuzhiyun
24*4882a593Smuzhiyun
25*4882a593Smuzhiyun #ifdef CONFIG_PRESERVE_FA_DUMP
26*4882a593Smuzhiyun /*
27*4882a593Smuzhiyun * When dump is active but PRESERVE_FA_DUMP is enabled on the kernel,
28*4882a593Smuzhiyun * ensure crash data is preserved in hope that the subsequent memory
29*4882a593Smuzhiyun * preserving kernel boot is going to process this crash data.
30*4882a593Smuzhiyun */
opal_fadump_dt_scan(struct fw_dump * fadump_conf,u64 node)31*4882a593Smuzhiyun void __init opal_fadump_dt_scan(struct fw_dump *fadump_conf, u64 node)
32*4882a593Smuzhiyun {
33*4882a593Smuzhiyun const struct opal_fadump_mem_struct *opal_fdm_active;
34*4882a593Smuzhiyun const __be32 *prop;
35*4882a593Smuzhiyun unsigned long dn;
36*4882a593Smuzhiyun u64 addr = 0;
37*4882a593Smuzhiyun s64 ret;
38*4882a593Smuzhiyun
39*4882a593Smuzhiyun dn = of_get_flat_dt_subnode_by_name(node, "dump");
40*4882a593Smuzhiyun if (dn == -FDT_ERR_NOTFOUND)
41*4882a593Smuzhiyun return;
42*4882a593Smuzhiyun
43*4882a593Smuzhiyun /*
44*4882a593Smuzhiyun * Check if dump has been initiated on last reboot.
45*4882a593Smuzhiyun */
46*4882a593Smuzhiyun prop = of_get_flat_dt_prop(dn, "mpipl-boot", NULL);
47*4882a593Smuzhiyun if (!prop)
48*4882a593Smuzhiyun return;
49*4882a593Smuzhiyun
50*4882a593Smuzhiyun ret = opal_mpipl_query_tag(OPAL_MPIPL_TAG_KERNEL, &addr);
51*4882a593Smuzhiyun if ((ret != OPAL_SUCCESS) || !addr) {
52*4882a593Smuzhiyun pr_debug("Could not get Kernel metadata (%lld)\n", ret);
53*4882a593Smuzhiyun return;
54*4882a593Smuzhiyun }
55*4882a593Smuzhiyun
56*4882a593Smuzhiyun /*
57*4882a593Smuzhiyun * Preserve memory only if kernel memory regions are registered
58*4882a593Smuzhiyun * with f/w for MPIPL.
59*4882a593Smuzhiyun */
60*4882a593Smuzhiyun addr = be64_to_cpu(addr);
61*4882a593Smuzhiyun pr_debug("Kernel metadata addr: %llx\n", addr);
62*4882a593Smuzhiyun opal_fdm_active = (void *)addr;
63*4882a593Smuzhiyun if (be16_to_cpu(opal_fdm_active->registered_regions) == 0)
64*4882a593Smuzhiyun return;
65*4882a593Smuzhiyun
66*4882a593Smuzhiyun ret = opal_mpipl_query_tag(OPAL_MPIPL_TAG_BOOT_MEM, &addr);
67*4882a593Smuzhiyun if ((ret != OPAL_SUCCESS) || !addr) {
68*4882a593Smuzhiyun pr_err("Failed to get boot memory tag (%lld)\n", ret);
69*4882a593Smuzhiyun return;
70*4882a593Smuzhiyun }
71*4882a593Smuzhiyun
72*4882a593Smuzhiyun /*
73*4882a593Smuzhiyun * Memory below this address can be used for booting a
74*4882a593Smuzhiyun * capture kernel or petitboot kernel. Preserve everything
75*4882a593Smuzhiyun * above this address for processing crashdump.
76*4882a593Smuzhiyun */
77*4882a593Smuzhiyun fadump_conf->boot_mem_top = be64_to_cpu(addr);
78*4882a593Smuzhiyun pr_debug("Preserve everything above %llx\n", fadump_conf->boot_mem_top);
79*4882a593Smuzhiyun
80*4882a593Smuzhiyun pr_info("Firmware-assisted dump is active.\n");
81*4882a593Smuzhiyun fadump_conf->dump_active = 1;
82*4882a593Smuzhiyun }
83*4882a593Smuzhiyun
84*4882a593Smuzhiyun #else /* CONFIG_PRESERVE_FA_DUMP */
85*4882a593Smuzhiyun static const struct opal_fadump_mem_struct *opal_fdm_active;
86*4882a593Smuzhiyun static const struct opal_mpipl_fadump *opal_cpu_metadata;
87*4882a593Smuzhiyun static struct opal_fadump_mem_struct *opal_fdm;
88*4882a593Smuzhiyun
89*4882a593Smuzhiyun #ifdef CONFIG_OPAL_CORE
90*4882a593Smuzhiyun extern bool kernel_initiated;
91*4882a593Smuzhiyun #endif
92*4882a593Smuzhiyun
93*4882a593Smuzhiyun static int opal_fadump_unregister(struct fw_dump *fadump_conf);
94*4882a593Smuzhiyun
opal_fadump_update_config(struct fw_dump * fadump_conf,const struct opal_fadump_mem_struct * fdm)95*4882a593Smuzhiyun static void opal_fadump_update_config(struct fw_dump *fadump_conf,
96*4882a593Smuzhiyun const struct opal_fadump_mem_struct *fdm)
97*4882a593Smuzhiyun {
98*4882a593Smuzhiyun pr_debug("Boot memory regions count: %d\n", be16_to_cpu(fdm->region_cnt));
99*4882a593Smuzhiyun
100*4882a593Smuzhiyun /*
101*4882a593Smuzhiyun * The destination address of the first boot memory region is the
102*4882a593Smuzhiyun * destination address of boot memory regions.
103*4882a593Smuzhiyun */
104*4882a593Smuzhiyun fadump_conf->boot_mem_dest_addr = be64_to_cpu(fdm->rgn[0].dest);
105*4882a593Smuzhiyun pr_debug("Destination address of boot memory regions: %#016llx\n",
106*4882a593Smuzhiyun fadump_conf->boot_mem_dest_addr);
107*4882a593Smuzhiyun
108*4882a593Smuzhiyun fadump_conf->fadumphdr_addr = be64_to_cpu(fdm->fadumphdr_addr);
109*4882a593Smuzhiyun }
110*4882a593Smuzhiyun
111*4882a593Smuzhiyun /*
112*4882a593Smuzhiyun * This function is called in the capture kernel to get configuration details
113*4882a593Smuzhiyun * from metadata setup by the first kernel.
114*4882a593Smuzhiyun */
opal_fadump_get_config(struct fw_dump * fadump_conf,const struct opal_fadump_mem_struct * fdm)115*4882a593Smuzhiyun static void opal_fadump_get_config(struct fw_dump *fadump_conf,
116*4882a593Smuzhiyun const struct opal_fadump_mem_struct *fdm)
117*4882a593Smuzhiyun {
118*4882a593Smuzhiyun unsigned long base, size, last_end, hole_size;
119*4882a593Smuzhiyun int i;
120*4882a593Smuzhiyun
121*4882a593Smuzhiyun if (!fadump_conf->dump_active)
122*4882a593Smuzhiyun return;
123*4882a593Smuzhiyun
124*4882a593Smuzhiyun last_end = 0;
125*4882a593Smuzhiyun hole_size = 0;
126*4882a593Smuzhiyun fadump_conf->boot_memory_size = 0;
127*4882a593Smuzhiyun
128*4882a593Smuzhiyun pr_debug("Boot memory regions:\n");
129*4882a593Smuzhiyun for (i = 0; i < be16_to_cpu(fdm->region_cnt); i++) {
130*4882a593Smuzhiyun base = be64_to_cpu(fdm->rgn[i].src);
131*4882a593Smuzhiyun size = be64_to_cpu(fdm->rgn[i].size);
132*4882a593Smuzhiyun pr_debug("\t[%03d] base: 0x%lx, size: 0x%lx\n", i, base, size);
133*4882a593Smuzhiyun
134*4882a593Smuzhiyun fadump_conf->boot_mem_addr[i] = base;
135*4882a593Smuzhiyun fadump_conf->boot_mem_sz[i] = size;
136*4882a593Smuzhiyun fadump_conf->boot_memory_size += size;
137*4882a593Smuzhiyun hole_size += (base - last_end);
138*4882a593Smuzhiyun
139*4882a593Smuzhiyun last_end = base + size;
140*4882a593Smuzhiyun }
141*4882a593Smuzhiyun
142*4882a593Smuzhiyun /*
143*4882a593Smuzhiyun * Start address of reserve dump area (permanent reservation) for
144*4882a593Smuzhiyun * re-registering FADump after dump capture.
145*4882a593Smuzhiyun */
146*4882a593Smuzhiyun fadump_conf->reserve_dump_area_start = be64_to_cpu(fdm->rgn[0].dest);
147*4882a593Smuzhiyun
148*4882a593Smuzhiyun /*
149*4882a593Smuzhiyun * Rarely, but it can so happen that system crashes before all
150*4882a593Smuzhiyun * boot memory regions are registered for MPIPL. In such
151*4882a593Smuzhiyun * cases, warn that the vmcore may not be accurate and proceed
152*4882a593Smuzhiyun * anyway as that is the best bet considering free pages, cache
153*4882a593Smuzhiyun * pages, user pages, etc are usually filtered out.
154*4882a593Smuzhiyun *
155*4882a593Smuzhiyun * Hope the memory that could not be preserved only has pages
156*4882a593Smuzhiyun * that are usually filtered out while saving the vmcore.
157*4882a593Smuzhiyun */
158*4882a593Smuzhiyun if (be16_to_cpu(fdm->region_cnt) > be16_to_cpu(fdm->registered_regions)) {
159*4882a593Smuzhiyun pr_warn("Not all memory regions were saved!!!\n");
160*4882a593Smuzhiyun pr_warn(" Unsaved memory regions:\n");
161*4882a593Smuzhiyun i = be16_to_cpu(fdm->registered_regions);
162*4882a593Smuzhiyun while (i < be16_to_cpu(fdm->region_cnt)) {
163*4882a593Smuzhiyun pr_warn("\t[%03d] base: 0x%llx, size: 0x%llx\n",
164*4882a593Smuzhiyun i, be64_to_cpu(fdm->rgn[i].src),
165*4882a593Smuzhiyun be64_to_cpu(fdm->rgn[i].size));
166*4882a593Smuzhiyun i++;
167*4882a593Smuzhiyun }
168*4882a593Smuzhiyun
169*4882a593Smuzhiyun pr_warn("If the unsaved regions only contain pages that are filtered out (eg. free/user pages), the vmcore should still be usable.\n");
170*4882a593Smuzhiyun pr_warn("WARNING: If the unsaved regions contain kernel pages, the vmcore will be corrupted.\n");
171*4882a593Smuzhiyun }
172*4882a593Smuzhiyun
173*4882a593Smuzhiyun fadump_conf->boot_mem_top = (fadump_conf->boot_memory_size + hole_size);
174*4882a593Smuzhiyun fadump_conf->boot_mem_regs_cnt = be16_to_cpu(fdm->region_cnt);
175*4882a593Smuzhiyun opal_fadump_update_config(fadump_conf, fdm);
176*4882a593Smuzhiyun }
177*4882a593Smuzhiyun
178*4882a593Smuzhiyun /* Initialize kernel metadata */
opal_fadump_init_metadata(struct opal_fadump_mem_struct * fdm)179*4882a593Smuzhiyun static void opal_fadump_init_metadata(struct opal_fadump_mem_struct *fdm)
180*4882a593Smuzhiyun {
181*4882a593Smuzhiyun fdm->version = OPAL_FADUMP_VERSION;
182*4882a593Smuzhiyun fdm->region_cnt = cpu_to_be16(0);
183*4882a593Smuzhiyun fdm->registered_regions = cpu_to_be16(0);
184*4882a593Smuzhiyun fdm->fadumphdr_addr = cpu_to_be64(0);
185*4882a593Smuzhiyun }
186*4882a593Smuzhiyun
opal_fadump_init_mem_struct(struct fw_dump * fadump_conf)187*4882a593Smuzhiyun static u64 opal_fadump_init_mem_struct(struct fw_dump *fadump_conf)
188*4882a593Smuzhiyun {
189*4882a593Smuzhiyun u64 addr = fadump_conf->reserve_dump_area_start;
190*4882a593Smuzhiyun u16 reg_cnt;
191*4882a593Smuzhiyun int i;
192*4882a593Smuzhiyun
193*4882a593Smuzhiyun opal_fdm = __va(fadump_conf->kernel_metadata);
194*4882a593Smuzhiyun opal_fadump_init_metadata(opal_fdm);
195*4882a593Smuzhiyun
196*4882a593Smuzhiyun /* Boot memory regions */
197*4882a593Smuzhiyun reg_cnt = be16_to_cpu(opal_fdm->region_cnt);
198*4882a593Smuzhiyun for (i = 0; i < fadump_conf->boot_mem_regs_cnt; i++) {
199*4882a593Smuzhiyun opal_fdm->rgn[i].src = cpu_to_be64(fadump_conf->boot_mem_addr[i]);
200*4882a593Smuzhiyun opal_fdm->rgn[i].dest = cpu_to_be64(addr);
201*4882a593Smuzhiyun opal_fdm->rgn[i].size = cpu_to_be64(fadump_conf->boot_mem_sz[i]);
202*4882a593Smuzhiyun
203*4882a593Smuzhiyun reg_cnt++;
204*4882a593Smuzhiyun addr += fadump_conf->boot_mem_sz[i];
205*4882a593Smuzhiyun }
206*4882a593Smuzhiyun opal_fdm->region_cnt = cpu_to_be16(reg_cnt);
207*4882a593Smuzhiyun
208*4882a593Smuzhiyun /*
209*4882a593Smuzhiyun * Kernel metadata is passed to f/w and retrieved in capture kerenl.
210*4882a593Smuzhiyun * So, use it to save fadump header address instead of calculating it.
211*4882a593Smuzhiyun */
212*4882a593Smuzhiyun opal_fdm->fadumphdr_addr = cpu_to_be64(be64_to_cpu(opal_fdm->rgn[0].dest) +
213*4882a593Smuzhiyun fadump_conf->boot_memory_size);
214*4882a593Smuzhiyun
215*4882a593Smuzhiyun opal_fadump_update_config(fadump_conf, opal_fdm);
216*4882a593Smuzhiyun
217*4882a593Smuzhiyun return addr;
218*4882a593Smuzhiyun }
219*4882a593Smuzhiyun
opal_fadump_get_metadata_size(void)220*4882a593Smuzhiyun static u64 opal_fadump_get_metadata_size(void)
221*4882a593Smuzhiyun {
222*4882a593Smuzhiyun return PAGE_ALIGN(sizeof(struct opal_fadump_mem_struct));
223*4882a593Smuzhiyun }
224*4882a593Smuzhiyun
opal_fadump_setup_metadata(struct fw_dump * fadump_conf)225*4882a593Smuzhiyun static int opal_fadump_setup_metadata(struct fw_dump *fadump_conf)
226*4882a593Smuzhiyun {
227*4882a593Smuzhiyun int err = 0;
228*4882a593Smuzhiyun s64 ret;
229*4882a593Smuzhiyun
230*4882a593Smuzhiyun /*
231*4882a593Smuzhiyun * Use the last page(s) in FADump memory reservation for
232*4882a593Smuzhiyun * kernel metadata.
233*4882a593Smuzhiyun */
234*4882a593Smuzhiyun fadump_conf->kernel_metadata = (fadump_conf->reserve_dump_area_start +
235*4882a593Smuzhiyun fadump_conf->reserve_dump_area_size -
236*4882a593Smuzhiyun opal_fadump_get_metadata_size());
237*4882a593Smuzhiyun pr_info("Kernel metadata addr: %llx\n", fadump_conf->kernel_metadata);
238*4882a593Smuzhiyun
239*4882a593Smuzhiyun /* Initialize kernel metadata before registering the address with f/w */
240*4882a593Smuzhiyun opal_fdm = __va(fadump_conf->kernel_metadata);
241*4882a593Smuzhiyun opal_fadump_init_metadata(opal_fdm);
242*4882a593Smuzhiyun
243*4882a593Smuzhiyun /*
244*4882a593Smuzhiyun * Register metadata address with f/w. Can be retrieved in
245*4882a593Smuzhiyun * the capture kernel.
246*4882a593Smuzhiyun */
247*4882a593Smuzhiyun ret = opal_mpipl_register_tag(OPAL_MPIPL_TAG_KERNEL,
248*4882a593Smuzhiyun fadump_conf->kernel_metadata);
249*4882a593Smuzhiyun if (ret != OPAL_SUCCESS) {
250*4882a593Smuzhiyun pr_err("Failed to set kernel metadata tag!\n");
251*4882a593Smuzhiyun err = -EPERM;
252*4882a593Smuzhiyun }
253*4882a593Smuzhiyun
254*4882a593Smuzhiyun /*
255*4882a593Smuzhiyun * Register boot memory top address with f/w. Should be retrieved
256*4882a593Smuzhiyun * by a kernel that intends to preserve crash'ed kernel's memory.
257*4882a593Smuzhiyun */
258*4882a593Smuzhiyun ret = opal_mpipl_register_tag(OPAL_MPIPL_TAG_BOOT_MEM,
259*4882a593Smuzhiyun fadump_conf->boot_mem_top);
260*4882a593Smuzhiyun if (ret != OPAL_SUCCESS) {
261*4882a593Smuzhiyun pr_err("Failed to set boot memory tag!\n");
262*4882a593Smuzhiyun err = -EPERM;
263*4882a593Smuzhiyun }
264*4882a593Smuzhiyun
265*4882a593Smuzhiyun return err;
266*4882a593Smuzhiyun }
267*4882a593Smuzhiyun
opal_fadump_get_bootmem_min(void)268*4882a593Smuzhiyun static u64 opal_fadump_get_bootmem_min(void)
269*4882a593Smuzhiyun {
270*4882a593Smuzhiyun return OPAL_FADUMP_MIN_BOOT_MEM;
271*4882a593Smuzhiyun }
272*4882a593Smuzhiyun
opal_fadump_register(struct fw_dump * fadump_conf)273*4882a593Smuzhiyun static int opal_fadump_register(struct fw_dump *fadump_conf)
274*4882a593Smuzhiyun {
275*4882a593Smuzhiyun s64 rc = OPAL_PARAMETER;
276*4882a593Smuzhiyun u16 registered_regs;
277*4882a593Smuzhiyun int i, err = -EIO;
278*4882a593Smuzhiyun
279*4882a593Smuzhiyun registered_regs = be16_to_cpu(opal_fdm->registered_regions);
280*4882a593Smuzhiyun for (i = 0; i < be16_to_cpu(opal_fdm->region_cnt); i++) {
281*4882a593Smuzhiyun rc = opal_mpipl_update(OPAL_MPIPL_ADD_RANGE,
282*4882a593Smuzhiyun be64_to_cpu(opal_fdm->rgn[i].src),
283*4882a593Smuzhiyun be64_to_cpu(opal_fdm->rgn[i].dest),
284*4882a593Smuzhiyun be64_to_cpu(opal_fdm->rgn[i].size));
285*4882a593Smuzhiyun if (rc != OPAL_SUCCESS)
286*4882a593Smuzhiyun break;
287*4882a593Smuzhiyun
288*4882a593Smuzhiyun registered_regs++;
289*4882a593Smuzhiyun }
290*4882a593Smuzhiyun opal_fdm->registered_regions = cpu_to_be16(registered_regs);
291*4882a593Smuzhiyun
292*4882a593Smuzhiyun switch (rc) {
293*4882a593Smuzhiyun case OPAL_SUCCESS:
294*4882a593Smuzhiyun pr_info("Registration is successful!\n");
295*4882a593Smuzhiyun fadump_conf->dump_registered = 1;
296*4882a593Smuzhiyun err = 0;
297*4882a593Smuzhiyun break;
298*4882a593Smuzhiyun case OPAL_RESOURCE:
299*4882a593Smuzhiyun /* If MAX regions limit in f/w is hit, warn and proceed. */
300*4882a593Smuzhiyun pr_warn("%d regions could not be registered for MPIPL as MAX limit is reached!\n",
301*4882a593Smuzhiyun (be16_to_cpu(opal_fdm->region_cnt) -
302*4882a593Smuzhiyun be16_to_cpu(opal_fdm->registered_regions)));
303*4882a593Smuzhiyun fadump_conf->dump_registered = 1;
304*4882a593Smuzhiyun err = 0;
305*4882a593Smuzhiyun break;
306*4882a593Smuzhiyun case OPAL_PARAMETER:
307*4882a593Smuzhiyun pr_err("Failed to register. Parameter Error(%lld).\n", rc);
308*4882a593Smuzhiyun break;
309*4882a593Smuzhiyun case OPAL_HARDWARE:
310*4882a593Smuzhiyun pr_err("Support not available.\n");
311*4882a593Smuzhiyun fadump_conf->fadump_supported = 0;
312*4882a593Smuzhiyun fadump_conf->fadump_enabled = 0;
313*4882a593Smuzhiyun break;
314*4882a593Smuzhiyun default:
315*4882a593Smuzhiyun pr_err("Failed to register. Unknown Error(%lld).\n", rc);
316*4882a593Smuzhiyun break;
317*4882a593Smuzhiyun }
318*4882a593Smuzhiyun
319*4882a593Smuzhiyun /*
320*4882a593Smuzhiyun * If some regions were registered before OPAL_MPIPL_ADD_RANGE
321*4882a593Smuzhiyun * OPAL call failed, unregister all regions.
322*4882a593Smuzhiyun */
323*4882a593Smuzhiyun if ((err < 0) && (be16_to_cpu(opal_fdm->registered_regions) > 0))
324*4882a593Smuzhiyun opal_fadump_unregister(fadump_conf);
325*4882a593Smuzhiyun
326*4882a593Smuzhiyun return err;
327*4882a593Smuzhiyun }
328*4882a593Smuzhiyun
opal_fadump_unregister(struct fw_dump * fadump_conf)329*4882a593Smuzhiyun static int opal_fadump_unregister(struct fw_dump *fadump_conf)
330*4882a593Smuzhiyun {
331*4882a593Smuzhiyun s64 rc;
332*4882a593Smuzhiyun
333*4882a593Smuzhiyun rc = opal_mpipl_update(OPAL_MPIPL_REMOVE_ALL, 0, 0, 0);
334*4882a593Smuzhiyun if (rc) {
335*4882a593Smuzhiyun pr_err("Failed to un-register - unexpected Error(%lld).\n", rc);
336*4882a593Smuzhiyun return -EIO;
337*4882a593Smuzhiyun }
338*4882a593Smuzhiyun
339*4882a593Smuzhiyun opal_fdm->registered_regions = cpu_to_be16(0);
340*4882a593Smuzhiyun fadump_conf->dump_registered = 0;
341*4882a593Smuzhiyun return 0;
342*4882a593Smuzhiyun }
343*4882a593Smuzhiyun
opal_fadump_invalidate(struct fw_dump * fadump_conf)344*4882a593Smuzhiyun static int opal_fadump_invalidate(struct fw_dump *fadump_conf)
345*4882a593Smuzhiyun {
346*4882a593Smuzhiyun s64 rc;
347*4882a593Smuzhiyun
348*4882a593Smuzhiyun rc = opal_mpipl_update(OPAL_MPIPL_FREE_PRESERVED_MEMORY, 0, 0, 0);
349*4882a593Smuzhiyun if (rc) {
350*4882a593Smuzhiyun pr_err("Failed to invalidate - unexpected Error(%lld).\n", rc);
351*4882a593Smuzhiyun return -EIO;
352*4882a593Smuzhiyun }
353*4882a593Smuzhiyun
354*4882a593Smuzhiyun fadump_conf->dump_active = 0;
355*4882a593Smuzhiyun opal_fdm_active = NULL;
356*4882a593Smuzhiyun return 0;
357*4882a593Smuzhiyun }
358*4882a593Smuzhiyun
opal_fadump_cleanup(struct fw_dump * fadump_conf)359*4882a593Smuzhiyun static void opal_fadump_cleanup(struct fw_dump *fadump_conf)
360*4882a593Smuzhiyun {
361*4882a593Smuzhiyun s64 ret;
362*4882a593Smuzhiyun
363*4882a593Smuzhiyun ret = opal_mpipl_register_tag(OPAL_MPIPL_TAG_KERNEL, 0);
364*4882a593Smuzhiyun if (ret != OPAL_SUCCESS)
365*4882a593Smuzhiyun pr_warn("Could not reset (%llu) kernel metadata tag!\n", ret);
366*4882a593Smuzhiyun }
367*4882a593Smuzhiyun
368*4882a593Smuzhiyun /*
369*4882a593Smuzhiyun * Verify if CPU state data is available. If available, do a bit of sanity
370*4882a593Smuzhiyun * checking before processing this data.
371*4882a593Smuzhiyun */
is_opal_fadump_cpu_data_valid(struct fw_dump * fadump_conf)372*4882a593Smuzhiyun static bool __init is_opal_fadump_cpu_data_valid(struct fw_dump *fadump_conf)
373*4882a593Smuzhiyun {
374*4882a593Smuzhiyun if (!opal_cpu_metadata)
375*4882a593Smuzhiyun return false;
376*4882a593Smuzhiyun
377*4882a593Smuzhiyun fadump_conf->cpu_state_data_version =
378*4882a593Smuzhiyun be32_to_cpu(opal_cpu_metadata->cpu_data_version);
379*4882a593Smuzhiyun fadump_conf->cpu_state_entry_size =
380*4882a593Smuzhiyun be32_to_cpu(opal_cpu_metadata->cpu_data_size);
381*4882a593Smuzhiyun fadump_conf->cpu_state_dest_vaddr =
382*4882a593Smuzhiyun (u64)__va(be64_to_cpu(opal_cpu_metadata->region[0].dest));
383*4882a593Smuzhiyun fadump_conf->cpu_state_data_size =
384*4882a593Smuzhiyun be64_to_cpu(opal_cpu_metadata->region[0].size);
385*4882a593Smuzhiyun
386*4882a593Smuzhiyun if (fadump_conf->cpu_state_data_version != HDAT_FADUMP_CPU_DATA_VER) {
387*4882a593Smuzhiyun pr_warn("Supported CPU state data version: %u, found: %d!\n",
388*4882a593Smuzhiyun HDAT_FADUMP_CPU_DATA_VER,
389*4882a593Smuzhiyun fadump_conf->cpu_state_data_version);
390*4882a593Smuzhiyun pr_warn("WARNING: F/W using newer CPU state data format!!\n");
391*4882a593Smuzhiyun }
392*4882a593Smuzhiyun
393*4882a593Smuzhiyun if ((fadump_conf->cpu_state_dest_vaddr == 0) ||
394*4882a593Smuzhiyun (fadump_conf->cpu_state_entry_size == 0) ||
395*4882a593Smuzhiyun (fadump_conf->cpu_state_entry_size >
396*4882a593Smuzhiyun fadump_conf->cpu_state_data_size)) {
397*4882a593Smuzhiyun pr_err("CPU state data is invalid. Ignoring!\n");
398*4882a593Smuzhiyun return false;
399*4882a593Smuzhiyun }
400*4882a593Smuzhiyun
401*4882a593Smuzhiyun return true;
402*4882a593Smuzhiyun }
403*4882a593Smuzhiyun
404*4882a593Smuzhiyun /*
405*4882a593Smuzhiyun * Convert CPU state data saved at the time of crash into ELF notes.
406*4882a593Smuzhiyun *
407*4882a593Smuzhiyun * While the crashing CPU's register data is saved by the kernel, CPU state
408*4882a593Smuzhiyun * data for all CPUs is saved by f/w. In CPU state data provided by f/w,
409*4882a593Smuzhiyun * each register entry is of 16 bytes, a numerical identifier along with
410*4882a593Smuzhiyun * a GPR/SPR flag in the first 8 bytes and the register value in the next
411*4882a593Smuzhiyun * 8 bytes. For more details refer to F/W documentation. If this data is
412*4882a593Smuzhiyun * missing or in unsupported format, append crashing CPU's register data
413*4882a593Smuzhiyun * saved by the kernel in the PT_NOTE, to have something to work with in
414*4882a593Smuzhiyun * the vmcore file.
415*4882a593Smuzhiyun */
416*4882a593Smuzhiyun static int __init
opal_fadump_build_cpu_notes(struct fw_dump * fadump_conf,struct fadump_crash_info_header * fdh)417*4882a593Smuzhiyun opal_fadump_build_cpu_notes(struct fw_dump *fadump_conf,
418*4882a593Smuzhiyun struct fadump_crash_info_header *fdh)
419*4882a593Smuzhiyun {
420*4882a593Smuzhiyun u32 thread_pir, size_per_thread, regs_offset, regs_cnt, reg_esize;
421*4882a593Smuzhiyun struct hdat_fadump_thread_hdr *thdr;
422*4882a593Smuzhiyun bool is_cpu_data_valid = false;
423*4882a593Smuzhiyun u32 num_cpus = 1, *note_buf;
424*4882a593Smuzhiyun struct pt_regs regs;
425*4882a593Smuzhiyun char *bufp;
426*4882a593Smuzhiyun int rc, i;
427*4882a593Smuzhiyun
428*4882a593Smuzhiyun if (is_opal_fadump_cpu_data_valid(fadump_conf)) {
429*4882a593Smuzhiyun size_per_thread = fadump_conf->cpu_state_entry_size;
430*4882a593Smuzhiyun num_cpus = (fadump_conf->cpu_state_data_size / size_per_thread);
431*4882a593Smuzhiyun bufp = __va(fadump_conf->cpu_state_dest_vaddr);
432*4882a593Smuzhiyun is_cpu_data_valid = true;
433*4882a593Smuzhiyun }
434*4882a593Smuzhiyun
435*4882a593Smuzhiyun rc = fadump_setup_cpu_notes_buf(num_cpus);
436*4882a593Smuzhiyun if (rc != 0)
437*4882a593Smuzhiyun return rc;
438*4882a593Smuzhiyun
439*4882a593Smuzhiyun note_buf = (u32 *)fadump_conf->cpu_notes_buf_vaddr;
440*4882a593Smuzhiyun if (!is_cpu_data_valid)
441*4882a593Smuzhiyun goto out;
442*4882a593Smuzhiyun
443*4882a593Smuzhiyun /*
444*4882a593Smuzhiyun * Offset for register entries, entry size and registers count is
445*4882a593Smuzhiyun * duplicated in every thread header in keeping with HDAT format.
446*4882a593Smuzhiyun * Use these values from the first thread header.
447*4882a593Smuzhiyun */
448*4882a593Smuzhiyun thdr = (struct hdat_fadump_thread_hdr *)bufp;
449*4882a593Smuzhiyun regs_offset = (offsetof(struct hdat_fadump_thread_hdr, offset) +
450*4882a593Smuzhiyun be32_to_cpu(thdr->offset));
451*4882a593Smuzhiyun reg_esize = be32_to_cpu(thdr->esize);
452*4882a593Smuzhiyun regs_cnt = be32_to_cpu(thdr->ecnt);
453*4882a593Smuzhiyun
454*4882a593Smuzhiyun pr_debug("--------CPU State Data------------\n");
455*4882a593Smuzhiyun pr_debug("NumCpus : %u\n", num_cpus);
456*4882a593Smuzhiyun pr_debug("\tOffset: %u, Entry size: %u, Cnt: %u\n",
457*4882a593Smuzhiyun regs_offset, reg_esize, regs_cnt);
458*4882a593Smuzhiyun
459*4882a593Smuzhiyun for (i = 0; i < num_cpus; i++, bufp += size_per_thread) {
460*4882a593Smuzhiyun thdr = (struct hdat_fadump_thread_hdr *)bufp;
461*4882a593Smuzhiyun
462*4882a593Smuzhiyun thread_pir = be32_to_cpu(thdr->pir);
463*4882a593Smuzhiyun pr_debug("[%04d] PIR: 0x%x, core state: 0x%02x\n",
464*4882a593Smuzhiyun i, thread_pir, thdr->core_state);
465*4882a593Smuzhiyun
466*4882a593Smuzhiyun /*
467*4882a593Smuzhiyun * If this is kernel initiated crash, crashing_cpu would be set
468*4882a593Smuzhiyun * appropriately and register data of the crashing CPU saved by
469*4882a593Smuzhiyun * crashing kernel. Add this saved register data of crashing CPU
470*4882a593Smuzhiyun * to elf notes and populate the pt_regs for the remaining CPUs
471*4882a593Smuzhiyun * from register state data provided by firmware.
472*4882a593Smuzhiyun */
473*4882a593Smuzhiyun if (fdh->crashing_cpu == thread_pir) {
474*4882a593Smuzhiyun note_buf = fadump_regs_to_elf_notes(note_buf,
475*4882a593Smuzhiyun &fdh->regs);
476*4882a593Smuzhiyun pr_debug("Crashing CPU PIR: 0x%x - R1 : 0x%lx, NIP : 0x%lx\n",
477*4882a593Smuzhiyun fdh->crashing_cpu, fdh->regs.gpr[1],
478*4882a593Smuzhiyun fdh->regs.nip);
479*4882a593Smuzhiyun continue;
480*4882a593Smuzhiyun }
481*4882a593Smuzhiyun
482*4882a593Smuzhiyun /*
483*4882a593Smuzhiyun * Register state data of MAX cores is provided by firmware,
484*4882a593Smuzhiyun * but some of this cores may not be active. So, while
485*4882a593Smuzhiyun * processing register state data, check core state and
486*4882a593Smuzhiyun * skip threads that belong to inactive cores.
487*4882a593Smuzhiyun */
488*4882a593Smuzhiyun if (thdr->core_state == HDAT_FADUMP_CORE_INACTIVE)
489*4882a593Smuzhiyun continue;
490*4882a593Smuzhiyun
491*4882a593Smuzhiyun opal_fadump_read_regs((bufp + regs_offset), regs_cnt,
492*4882a593Smuzhiyun reg_esize, true, ®s);
493*4882a593Smuzhiyun note_buf = fadump_regs_to_elf_notes(note_buf, ®s);
494*4882a593Smuzhiyun pr_debug("CPU PIR: 0x%x - R1 : 0x%lx, NIP : 0x%lx\n",
495*4882a593Smuzhiyun thread_pir, regs.gpr[1], regs.nip);
496*4882a593Smuzhiyun }
497*4882a593Smuzhiyun
498*4882a593Smuzhiyun out:
499*4882a593Smuzhiyun /*
500*4882a593Smuzhiyun * CPU state data is invalid/unsupported. Try appending crashing CPU's
501*4882a593Smuzhiyun * register data, if it is saved by the kernel.
502*4882a593Smuzhiyun */
503*4882a593Smuzhiyun if (fadump_conf->cpu_notes_buf_vaddr == (u64)note_buf) {
504*4882a593Smuzhiyun if (fdh->crashing_cpu == FADUMP_CPU_UNKNOWN) {
505*4882a593Smuzhiyun fadump_free_cpu_notes_buf();
506*4882a593Smuzhiyun return -ENODEV;
507*4882a593Smuzhiyun }
508*4882a593Smuzhiyun
509*4882a593Smuzhiyun pr_warn("WARNING: appending only crashing CPU's register data\n");
510*4882a593Smuzhiyun note_buf = fadump_regs_to_elf_notes(note_buf, &(fdh->regs));
511*4882a593Smuzhiyun }
512*4882a593Smuzhiyun
513*4882a593Smuzhiyun final_note(note_buf);
514*4882a593Smuzhiyun
515*4882a593Smuzhiyun pr_debug("Updating elfcore header (%llx) with cpu notes\n",
516*4882a593Smuzhiyun fdh->elfcorehdr_addr);
517*4882a593Smuzhiyun fadump_update_elfcore_header(__va(fdh->elfcorehdr_addr));
518*4882a593Smuzhiyun return 0;
519*4882a593Smuzhiyun }
520*4882a593Smuzhiyun
opal_fadump_process(struct fw_dump * fadump_conf)521*4882a593Smuzhiyun static int __init opal_fadump_process(struct fw_dump *fadump_conf)
522*4882a593Smuzhiyun {
523*4882a593Smuzhiyun struct fadump_crash_info_header *fdh;
524*4882a593Smuzhiyun int rc = -EINVAL;
525*4882a593Smuzhiyun
526*4882a593Smuzhiyun if (!opal_fdm_active || !fadump_conf->fadumphdr_addr)
527*4882a593Smuzhiyun return rc;
528*4882a593Smuzhiyun
529*4882a593Smuzhiyun /* Validate the fadump crash info header */
530*4882a593Smuzhiyun fdh = __va(fadump_conf->fadumphdr_addr);
531*4882a593Smuzhiyun if (fdh->magic_number != FADUMP_CRASH_INFO_MAGIC) {
532*4882a593Smuzhiyun pr_err("Crash info header is not valid.\n");
533*4882a593Smuzhiyun return rc;
534*4882a593Smuzhiyun }
535*4882a593Smuzhiyun
536*4882a593Smuzhiyun #ifdef CONFIG_OPAL_CORE
537*4882a593Smuzhiyun /*
538*4882a593Smuzhiyun * If this is a kernel initiated crash, crashing_cpu would be set
539*4882a593Smuzhiyun * appropriately and register data of the crashing CPU saved by
540*4882a593Smuzhiyun * crashing kernel. Add this saved register data of crashing CPU
541*4882a593Smuzhiyun * to elf notes and populate the pt_regs for the remaining CPUs
542*4882a593Smuzhiyun * from register state data provided by firmware.
543*4882a593Smuzhiyun */
544*4882a593Smuzhiyun if (fdh->crashing_cpu != FADUMP_CPU_UNKNOWN)
545*4882a593Smuzhiyun kernel_initiated = true;
546*4882a593Smuzhiyun #endif
547*4882a593Smuzhiyun
548*4882a593Smuzhiyun rc = opal_fadump_build_cpu_notes(fadump_conf, fdh);
549*4882a593Smuzhiyun if (rc)
550*4882a593Smuzhiyun return rc;
551*4882a593Smuzhiyun
552*4882a593Smuzhiyun /*
553*4882a593Smuzhiyun * We are done validating dump info and elfcore header is now ready
554*4882a593Smuzhiyun * to be exported. set elfcorehdr_addr so that vmcore module will
555*4882a593Smuzhiyun * export the elfcore header through '/proc/vmcore'.
556*4882a593Smuzhiyun */
557*4882a593Smuzhiyun elfcorehdr_addr = fdh->elfcorehdr_addr;
558*4882a593Smuzhiyun
559*4882a593Smuzhiyun return rc;
560*4882a593Smuzhiyun }
561*4882a593Smuzhiyun
opal_fadump_region_show(struct fw_dump * fadump_conf,struct seq_file * m)562*4882a593Smuzhiyun static void opal_fadump_region_show(struct fw_dump *fadump_conf,
563*4882a593Smuzhiyun struct seq_file *m)
564*4882a593Smuzhiyun {
565*4882a593Smuzhiyun const struct opal_fadump_mem_struct *fdm_ptr;
566*4882a593Smuzhiyun u64 dumped_bytes = 0;
567*4882a593Smuzhiyun int i;
568*4882a593Smuzhiyun
569*4882a593Smuzhiyun if (fadump_conf->dump_active)
570*4882a593Smuzhiyun fdm_ptr = opal_fdm_active;
571*4882a593Smuzhiyun else
572*4882a593Smuzhiyun fdm_ptr = opal_fdm;
573*4882a593Smuzhiyun
574*4882a593Smuzhiyun for (i = 0; i < be16_to_cpu(fdm_ptr->region_cnt); i++) {
575*4882a593Smuzhiyun /*
576*4882a593Smuzhiyun * Only regions that are registered for MPIPL
577*4882a593Smuzhiyun * would have dump data.
578*4882a593Smuzhiyun */
579*4882a593Smuzhiyun if ((fadump_conf->dump_active) &&
580*4882a593Smuzhiyun (i < be16_to_cpu(fdm_ptr->registered_regions)))
581*4882a593Smuzhiyun dumped_bytes = be64_to_cpu(fdm_ptr->rgn[i].size);
582*4882a593Smuzhiyun
583*4882a593Smuzhiyun seq_printf(m, "DUMP: Src: %#016llx, Dest: %#016llx, ",
584*4882a593Smuzhiyun be64_to_cpu(fdm_ptr->rgn[i].src),
585*4882a593Smuzhiyun be64_to_cpu(fdm_ptr->rgn[i].dest));
586*4882a593Smuzhiyun seq_printf(m, "Size: %#llx, Dumped: %#llx bytes\n",
587*4882a593Smuzhiyun be64_to_cpu(fdm_ptr->rgn[i].size), dumped_bytes);
588*4882a593Smuzhiyun }
589*4882a593Smuzhiyun
590*4882a593Smuzhiyun /* Dump is active. Show reserved area start address. */
591*4882a593Smuzhiyun if (fadump_conf->dump_active) {
592*4882a593Smuzhiyun seq_printf(m, "\nMemory above %#016lx is reserved for saving crash dump\n",
593*4882a593Smuzhiyun fadump_conf->reserve_dump_area_start);
594*4882a593Smuzhiyun }
595*4882a593Smuzhiyun }
596*4882a593Smuzhiyun
opal_fadump_trigger(struct fadump_crash_info_header * fdh,const char * msg)597*4882a593Smuzhiyun static void opal_fadump_trigger(struct fadump_crash_info_header *fdh,
598*4882a593Smuzhiyun const char *msg)
599*4882a593Smuzhiyun {
600*4882a593Smuzhiyun int rc;
601*4882a593Smuzhiyun
602*4882a593Smuzhiyun /*
603*4882a593Smuzhiyun * Unlike on pSeries platform, logical CPU number is not provided
604*4882a593Smuzhiyun * with architected register state data. So, store the crashing
605*4882a593Smuzhiyun * CPU's PIR instead to plug the appropriate register data for
606*4882a593Smuzhiyun * crashing CPU in the vmcore file.
607*4882a593Smuzhiyun */
608*4882a593Smuzhiyun fdh->crashing_cpu = (u32)mfspr(SPRN_PIR);
609*4882a593Smuzhiyun
610*4882a593Smuzhiyun rc = opal_cec_reboot2(OPAL_REBOOT_MPIPL, msg);
611*4882a593Smuzhiyun if (rc == OPAL_UNSUPPORTED) {
612*4882a593Smuzhiyun pr_emerg("Reboot type %d not supported.\n",
613*4882a593Smuzhiyun OPAL_REBOOT_MPIPL);
614*4882a593Smuzhiyun } else if (rc == OPAL_HARDWARE)
615*4882a593Smuzhiyun pr_emerg("No backend support for MPIPL!\n");
616*4882a593Smuzhiyun }
617*4882a593Smuzhiyun
618*4882a593Smuzhiyun static struct fadump_ops opal_fadump_ops = {
619*4882a593Smuzhiyun .fadump_init_mem_struct = opal_fadump_init_mem_struct,
620*4882a593Smuzhiyun .fadump_get_metadata_size = opal_fadump_get_metadata_size,
621*4882a593Smuzhiyun .fadump_setup_metadata = opal_fadump_setup_metadata,
622*4882a593Smuzhiyun .fadump_get_bootmem_min = opal_fadump_get_bootmem_min,
623*4882a593Smuzhiyun .fadump_register = opal_fadump_register,
624*4882a593Smuzhiyun .fadump_unregister = opal_fadump_unregister,
625*4882a593Smuzhiyun .fadump_invalidate = opal_fadump_invalidate,
626*4882a593Smuzhiyun .fadump_cleanup = opal_fadump_cleanup,
627*4882a593Smuzhiyun .fadump_process = opal_fadump_process,
628*4882a593Smuzhiyun .fadump_region_show = opal_fadump_region_show,
629*4882a593Smuzhiyun .fadump_trigger = opal_fadump_trigger,
630*4882a593Smuzhiyun };
631*4882a593Smuzhiyun
opal_fadump_dt_scan(struct fw_dump * fadump_conf,u64 node)632*4882a593Smuzhiyun void __init opal_fadump_dt_scan(struct fw_dump *fadump_conf, u64 node)
633*4882a593Smuzhiyun {
634*4882a593Smuzhiyun const __be32 *prop;
635*4882a593Smuzhiyun unsigned long dn;
636*4882a593Smuzhiyun __be64 be_addr;
637*4882a593Smuzhiyun u64 addr = 0;
638*4882a593Smuzhiyun int i, len;
639*4882a593Smuzhiyun s64 ret;
640*4882a593Smuzhiyun
641*4882a593Smuzhiyun /*
642*4882a593Smuzhiyun * Check if Firmware-Assisted Dump is supported. if yes, check
643*4882a593Smuzhiyun * if dump has been initiated on last reboot.
644*4882a593Smuzhiyun */
645*4882a593Smuzhiyun dn = of_get_flat_dt_subnode_by_name(node, "dump");
646*4882a593Smuzhiyun if (dn == -FDT_ERR_NOTFOUND) {
647*4882a593Smuzhiyun pr_debug("FADump support is missing!\n");
648*4882a593Smuzhiyun return;
649*4882a593Smuzhiyun }
650*4882a593Smuzhiyun
651*4882a593Smuzhiyun if (!of_flat_dt_is_compatible(dn, "ibm,opal-dump")) {
652*4882a593Smuzhiyun pr_err("Support missing for this f/w version!\n");
653*4882a593Smuzhiyun return;
654*4882a593Smuzhiyun }
655*4882a593Smuzhiyun
656*4882a593Smuzhiyun prop = of_get_flat_dt_prop(dn, "fw-load-area", &len);
657*4882a593Smuzhiyun if (prop) {
658*4882a593Smuzhiyun /*
659*4882a593Smuzhiyun * Each f/w load area is an (address,size) pair,
660*4882a593Smuzhiyun * 2 cells each, totalling 4 cells per range.
661*4882a593Smuzhiyun */
662*4882a593Smuzhiyun for (i = 0; i < len / (sizeof(*prop) * 4); i++) {
663*4882a593Smuzhiyun u64 base, end;
664*4882a593Smuzhiyun
665*4882a593Smuzhiyun base = of_read_number(prop + (i * 4) + 0, 2);
666*4882a593Smuzhiyun end = base;
667*4882a593Smuzhiyun end += of_read_number(prop + (i * 4) + 2, 2);
668*4882a593Smuzhiyun if (end > OPAL_FADUMP_MIN_BOOT_MEM) {
669*4882a593Smuzhiyun pr_err("F/W load area: 0x%llx-0x%llx\n",
670*4882a593Smuzhiyun base, end);
671*4882a593Smuzhiyun pr_err("F/W version not supported!\n");
672*4882a593Smuzhiyun return;
673*4882a593Smuzhiyun }
674*4882a593Smuzhiyun }
675*4882a593Smuzhiyun }
676*4882a593Smuzhiyun
677*4882a593Smuzhiyun fadump_conf->ops = &opal_fadump_ops;
678*4882a593Smuzhiyun fadump_conf->fadump_supported = 1;
679*4882a593Smuzhiyun
680*4882a593Smuzhiyun /*
681*4882a593Smuzhiyun * Firmware supports 32-bit field for size. Align it to PAGE_SIZE
682*4882a593Smuzhiyun * and request firmware to copy multiple kernel boot memory regions.
683*4882a593Smuzhiyun */
684*4882a593Smuzhiyun fadump_conf->max_copy_size = ALIGN_DOWN(U32_MAX, PAGE_SIZE);
685*4882a593Smuzhiyun
686*4882a593Smuzhiyun /*
687*4882a593Smuzhiyun * Check if dump has been initiated on last reboot.
688*4882a593Smuzhiyun */
689*4882a593Smuzhiyun prop = of_get_flat_dt_prop(dn, "mpipl-boot", NULL);
690*4882a593Smuzhiyun if (!prop)
691*4882a593Smuzhiyun return;
692*4882a593Smuzhiyun
693*4882a593Smuzhiyun ret = opal_mpipl_query_tag(OPAL_MPIPL_TAG_KERNEL, &be_addr);
694*4882a593Smuzhiyun if ((ret != OPAL_SUCCESS) || !be_addr) {
695*4882a593Smuzhiyun pr_err("Failed to get Kernel metadata (%lld)\n", ret);
696*4882a593Smuzhiyun return;
697*4882a593Smuzhiyun }
698*4882a593Smuzhiyun
699*4882a593Smuzhiyun addr = be64_to_cpu(be_addr);
700*4882a593Smuzhiyun pr_debug("Kernel metadata addr: %llx\n", addr);
701*4882a593Smuzhiyun
702*4882a593Smuzhiyun opal_fdm_active = __va(addr);
703*4882a593Smuzhiyun if (opal_fdm_active->version != OPAL_FADUMP_VERSION) {
704*4882a593Smuzhiyun pr_warn("Supported kernel metadata version: %u, found: %d!\n",
705*4882a593Smuzhiyun OPAL_FADUMP_VERSION, opal_fdm_active->version);
706*4882a593Smuzhiyun pr_warn("WARNING: Kernel metadata format mismatch identified! Core file maybe corrupted..\n");
707*4882a593Smuzhiyun }
708*4882a593Smuzhiyun
709*4882a593Smuzhiyun /* Kernel regions not registered with f/w for MPIPL */
710*4882a593Smuzhiyun if (be16_to_cpu(opal_fdm_active->registered_regions) == 0) {
711*4882a593Smuzhiyun opal_fdm_active = NULL;
712*4882a593Smuzhiyun return;
713*4882a593Smuzhiyun }
714*4882a593Smuzhiyun
715*4882a593Smuzhiyun ret = opal_mpipl_query_tag(OPAL_MPIPL_TAG_CPU, &be_addr);
716*4882a593Smuzhiyun if (be_addr) {
717*4882a593Smuzhiyun addr = be64_to_cpu(be_addr);
718*4882a593Smuzhiyun pr_debug("CPU metadata addr: %llx\n", addr);
719*4882a593Smuzhiyun opal_cpu_metadata = __va(addr);
720*4882a593Smuzhiyun }
721*4882a593Smuzhiyun
722*4882a593Smuzhiyun pr_info("Firmware-assisted dump is active.\n");
723*4882a593Smuzhiyun fadump_conf->dump_active = 1;
724*4882a593Smuzhiyun opal_fadump_get_config(fadump_conf, opal_fdm_active);
725*4882a593Smuzhiyun }
726*4882a593Smuzhiyun #endif /* !CONFIG_PRESERVE_FA_DUMP */
727