1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Copyright IBM Corp. 2005, 2011
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * Author(s): Rolf Adelsberger,
6*4882a593Smuzhiyun * Heiko Carstens <heiko.carstens@de.ibm.com>
7*4882a593Smuzhiyun * Michael Holzheu <holzheu@linux.vnet.ibm.com>
8*4882a593Smuzhiyun */
9*4882a593Smuzhiyun
10*4882a593Smuzhiyun #include <linux/device.h>
11*4882a593Smuzhiyun #include <linux/mm.h>
12*4882a593Smuzhiyun #include <linux/kexec.h>
13*4882a593Smuzhiyun #include <linux/delay.h>
14*4882a593Smuzhiyun #include <linux/reboot.h>
15*4882a593Smuzhiyun #include <linux/ftrace.h>
16*4882a593Smuzhiyun #include <linux/debug_locks.h>
17*4882a593Smuzhiyun #include <asm/cio.h>
18*4882a593Smuzhiyun #include <asm/setup.h>
19*4882a593Smuzhiyun #include <asm/smp.h>
20*4882a593Smuzhiyun #include <asm/ipl.h>
21*4882a593Smuzhiyun #include <asm/diag.h>
22*4882a593Smuzhiyun #include <asm/elf.h>
23*4882a593Smuzhiyun #include <asm/asm-offsets.h>
24*4882a593Smuzhiyun #include <asm/cacheflush.h>
25*4882a593Smuzhiyun #include <asm/os_info.h>
26*4882a593Smuzhiyun #include <asm/set_memory.h>
27*4882a593Smuzhiyun #include <asm/stacktrace.h>
28*4882a593Smuzhiyun #include <asm/switch_to.h>
29*4882a593Smuzhiyun #include <asm/nmi.h>
30*4882a593Smuzhiyun
31*4882a593Smuzhiyun typedef void (*relocate_kernel_t)(kimage_entry_t *, unsigned long);
32*4882a593Smuzhiyun
33*4882a593Smuzhiyun extern const unsigned char relocate_kernel[];
34*4882a593Smuzhiyun extern const unsigned long long relocate_kernel_len;
35*4882a593Smuzhiyun
36*4882a593Smuzhiyun #ifdef CONFIG_CRASH_DUMP
37*4882a593Smuzhiyun
38*4882a593Smuzhiyun /*
39*4882a593Smuzhiyun * Reset the system, copy boot CPU registers to absolute zero,
40*4882a593Smuzhiyun * and jump to the kdump image
41*4882a593Smuzhiyun */
__do_machine_kdump(void * image)42*4882a593Smuzhiyun static void __do_machine_kdump(void *image)
43*4882a593Smuzhiyun {
44*4882a593Smuzhiyun int (*start_kdump)(int);
45*4882a593Smuzhiyun unsigned long prefix;
46*4882a593Smuzhiyun
47*4882a593Smuzhiyun /* store_status() saved the prefix register to lowcore */
48*4882a593Smuzhiyun prefix = (unsigned long) S390_lowcore.prefixreg_save_area;
49*4882a593Smuzhiyun
50*4882a593Smuzhiyun /* Now do the reset */
51*4882a593Smuzhiyun s390_reset_system();
52*4882a593Smuzhiyun
53*4882a593Smuzhiyun /*
54*4882a593Smuzhiyun * Copy dump CPU store status info to absolute zero.
55*4882a593Smuzhiyun * This need to be done *after* s390_reset_system set the
56*4882a593Smuzhiyun * prefix register of this CPU to zero
57*4882a593Smuzhiyun */
58*4882a593Smuzhiyun memcpy((void *) __LC_FPREGS_SAVE_AREA,
59*4882a593Smuzhiyun (void *)(prefix + __LC_FPREGS_SAVE_AREA), 512);
60*4882a593Smuzhiyun
61*4882a593Smuzhiyun __load_psw_mask(PSW_MASK_BASE | PSW_DEFAULT_KEY | PSW_MASK_EA | PSW_MASK_BA);
62*4882a593Smuzhiyun start_kdump = (void *)((struct kimage *) image)->start;
63*4882a593Smuzhiyun start_kdump(1);
64*4882a593Smuzhiyun
65*4882a593Smuzhiyun /* Die if start_kdump returns */
66*4882a593Smuzhiyun disabled_wait();
67*4882a593Smuzhiyun }
68*4882a593Smuzhiyun
69*4882a593Smuzhiyun /*
70*4882a593Smuzhiyun * Start kdump: create a LGR log entry, store status of all CPUs and
71*4882a593Smuzhiyun * branch to __do_machine_kdump.
72*4882a593Smuzhiyun */
__machine_kdump(void * image)73*4882a593Smuzhiyun static noinline void __machine_kdump(void *image)
74*4882a593Smuzhiyun {
75*4882a593Smuzhiyun struct mcesa *mcesa;
76*4882a593Smuzhiyun union ctlreg2 cr2_old, cr2_new;
77*4882a593Smuzhiyun int this_cpu, cpu;
78*4882a593Smuzhiyun
79*4882a593Smuzhiyun lgr_info_log();
80*4882a593Smuzhiyun /* Get status of the other CPUs */
81*4882a593Smuzhiyun this_cpu = smp_find_processor_id(stap());
82*4882a593Smuzhiyun for_each_online_cpu(cpu) {
83*4882a593Smuzhiyun if (cpu == this_cpu)
84*4882a593Smuzhiyun continue;
85*4882a593Smuzhiyun if (smp_store_status(cpu))
86*4882a593Smuzhiyun continue;
87*4882a593Smuzhiyun }
88*4882a593Smuzhiyun /* Store status of the boot CPU */
89*4882a593Smuzhiyun mcesa = (struct mcesa *)(S390_lowcore.mcesad & MCESA_ORIGIN_MASK);
90*4882a593Smuzhiyun if (MACHINE_HAS_VX)
91*4882a593Smuzhiyun save_vx_regs((__vector128 *) mcesa->vector_save_area);
92*4882a593Smuzhiyun if (MACHINE_HAS_GS) {
93*4882a593Smuzhiyun __ctl_store(cr2_old.val, 2, 2);
94*4882a593Smuzhiyun cr2_new = cr2_old;
95*4882a593Smuzhiyun cr2_new.gse = 1;
96*4882a593Smuzhiyun __ctl_load(cr2_new.val, 2, 2);
97*4882a593Smuzhiyun save_gs_cb((struct gs_cb *) mcesa->guarded_storage_save_area);
98*4882a593Smuzhiyun __ctl_load(cr2_old.val, 2, 2);
99*4882a593Smuzhiyun }
100*4882a593Smuzhiyun /*
101*4882a593Smuzhiyun * To create a good backchain for this CPU in the dump store_status
102*4882a593Smuzhiyun * is passed the address of a function. The address is saved into
103*4882a593Smuzhiyun * the PSW save area of the boot CPU and the function is invoked as
104*4882a593Smuzhiyun * a tail call of store_status. The backchain in the dump will look
105*4882a593Smuzhiyun * like this:
106*4882a593Smuzhiyun * restart_int_handler -> __machine_kexec -> __do_machine_kdump
107*4882a593Smuzhiyun * The call to store_status() will not return.
108*4882a593Smuzhiyun */
109*4882a593Smuzhiyun store_status(__do_machine_kdump, image);
110*4882a593Smuzhiyun }
111*4882a593Smuzhiyun
do_start_kdump(unsigned long addr)112*4882a593Smuzhiyun static unsigned long do_start_kdump(unsigned long addr)
113*4882a593Smuzhiyun {
114*4882a593Smuzhiyun struct kimage *image = (struct kimage *) addr;
115*4882a593Smuzhiyun int (*start_kdump)(int) = (void *)image->start;
116*4882a593Smuzhiyun int rc;
117*4882a593Smuzhiyun
118*4882a593Smuzhiyun __arch_local_irq_stnsm(0xfb); /* disable DAT */
119*4882a593Smuzhiyun rc = start_kdump(0);
120*4882a593Smuzhiyun __arch_local_irq_stosm(0x04); /* enable DAT */
121*4882a593Smuzhiyun return rc;
122*4882a593Smuzhiyun }
123*4882a593Smuzhiyun
124*4882a593Smuzhiyun #endif /* CONFIG_CRASH_DUMP */
125*4882a593Smuzhiyun
126*4882a593Smuzhiyun /*
127*4882a593Smuzhiyun * Check if kdump checksums are valid: We call purgatory with parameter "0"
128*4882a593Smuzhiyun */
kdump_csum_valid(struct kimage * image)129*4882a593Smuzhiyun static bool kdump_csum_valid(struct kimage *image)
130*4882a593Smuzhiyun {
131*4882a593Smuzhiyun #ifdef CONFIG_CRASH_DUMP
132*4882a593Smuzhiyun int rc;
133*4882a593Smuzhiyun
134*4882a593Smuzhiyun preempt_disable();
135*4882a593Smuzhiyun rc = CALL_ON_STACK(do_start_kdump, S390_lowcore.nodat_stack, 1, image);
136*4882a593Smuzhiyun preempt_enable();
137*4882a593Smuzhiyun return rc == 0;
138*4882a593Smuzhiyun #else
139*4882a593Smuzhiyun return false;
140*4882a593Smuzhiyun #endif
141*4882a593Smuzhiyun }
142*4882a593Smuzhiyun
143*4882a593Smuzhiyun #ifdef CONFIG_CRASH_DUMP
144*4882a593Smuzhiyun
crash_free_reserved_phys_range(unsigned long begin,unsigned long end)145*4882a593Smuzhiyun void crash_free_reserved_phys_range(unsigned long begin, unsigned long end)
146*4882a593Smuzhiyun {
147*4882a593Smuzhiyun unsigned long addr, size;
148*4882a593Smuzhiyun
149*4882a593Smuzhiyun for (addr = begin; addr < end; addr += PAGE_SIZE)
150*4882a593Smuzhiyun free_reserved_page(pfn_to_page(addr >> PAGE_SHIFT));
151*4882a593Smuzhiyun size = begin - crashk_res.start;
152*4882a593Smuzhiyun if (size)
153*4882a593Smuzhiyun os_info_crashkernel_add(crashk_res.start, size);
154*4882a593Smuzhiyun else
155*4882a593Smuzhiyun os_info_crashkernel_add(0, 0);
156*4882a593Smuzhiyun }
157*4882a593Smuzhiyun
crash_protect_pages(int protect)158*4882a593Smuzhiyun static void crash_protect_pages(int protect)
159*4882a593Smuzhiyun {
160*4882a593Smuzhiyun unsigned long size;
161*4882a593Smuzhiyun
162*4882a593Smuzhiyun if (!crashk_res.end)
163*4882a593Smuzhiyun return;
164*4882a593Smuzhiyun size = resource_size(&crashk_res);
165*4882a593Smuzhiyun if (protect)
166*4882a593Smuzhiyun set_memory_ro(crashk_res.start, size >> PAGE_SHIFT);
167*4882a593Smuzhiyun else
168*4882a593Smuzhiyun set_memory_rw(crashk_res.start, size >> PAGE_SHIFT);
169*4882a593Smuzhiyun }
170*4882a593Smuzhiyun
arch_kexec_protect_crashkres(void)171*4882a593Smuzhiyun void arch_kexec_protect_crashkres(void)
172*4882a593Smuzhiyun {
173*4882a593Smuzhiyun crash_protect_pages(1);
174*4882a593Smuzhiyun }
175*4882a593Smuzhiyun
arch_kexec_unprotect_crashkres(void)176*4882a593Smuzhiyun void arch_kexec_unprotect_crashkres(void)
177*4882a593Smuzhiyun {
178*4882a593Smuzhiyun crash_protect_pages(0);
179*4882a593Smuzhiyun }
180*4882a593Smuzhiyun
181*4882a593Smuzhiyun #endif
182*4882a593Smuzhiyun
183*4882a593Smuzhiyun /*
184*4882a593Smuzhiyun * Give back memory to hypervisor before new kdump is loaded
185*4882a593Smuzhiyun */
machine_kexec_prepare_kdump(void)186*4882a593Smuzhiyun static int machine_kexec_prepare_kdump(void)
187*4882a593Smuzhiyun {
188*4882a593Smuzhiyun #ifdef CONFIG_CRASH_DUMP
189*4882a593Smuzhiyun if (MACHINE_IS_VM)
190*4882a593Smuzhiyun diag10_range(PFN_DOWN(crashk_res.start),
191*4882a593Smuzhiyun PFN_DOWN(crashk_res.end - crashk_res.start + 1));
192*4882a593Smuzhiyun return 0;
193*4882a593Smuzhiyun #else
194*4882a593Smuzhiyun return -EINVAL;
195*4882a593Smuzhiyun #endif
196*4882a593Smuzhiyun }
197*4882a593Smuzhiyun
machine_kexec_prepare(struct kimage * image)198*4882a593Smuzhiyun int machine_kexec_prepare(struct kimage *image)
199*4882a593Smuzhiyun {
200*4882a593Smuzhiyun void *reboot_code_buffer;
201*4882a593Smuzhiyun
202*4882a593Smuzhiyun if (image->type == KEXEC_TYPE_CRASH)
203*4882a593Smuzhiyun return machine_kexec_prepare_kdump();
204*4882a593Smuzhiyun
205*4882a593Smuzhiyun /* We don't support anything but the default image type for now. */
206*4882a593Smuzhiyun if (image->type != KEXEC_TYPE_DEFAULT)
207*4882a593Smuzhiyun return -EINVAL;
208*4882a593Smuzhiyun
209*4882a593Smuzhiyun /* Get the destination where the assembler code should be copied to.*/
210*4882a593Smuzhiyun reboot_code_buffer = (void *) page_to_phys(image->control_code_page);
211*4882a593Smuzhiyun
212*4882a593Smuzhiyun /* Then copy it */
213*4882a593Smuzhiyun memcpy(reboot_code_buffer, relocate_kernel, relocate_kernel_len);
214*4882a593Smuzhiyun return 0;
215*4882a593Smuzhiyun }
216*4882a593Smuzhiyun
machine_kexec_cleanup(struct kimage * image)217*4882a593Smuzhiyun void machine_kexec_cleanup(struct kimage *image)
218*4882a593Smuzhiyun {
219*4882a593Smuzhiyun }
220*4882a593Smuzhiyun
arch_crash_save_vmcoreinfo(void)221*4882a593Smuzhiyun void arch_crash_save_vmcoreinfo(void)
222*4882a593Smuzhiyun {
223*4882a593Smuzhiyun VMCOREINFO_SYMBOL(lowcore_ptr);
224*4882a593Smuzhiyun VMCOREINFO_SYMBOL(high_memory);
225*4882a593Smuzhiyun VMCOREINFO_LENGTH(lowcore_ptr, NR_CPUS);
226*4882a593Smuzhiyun vmcoreinfo_append_str("SDMA=%lx\n", __sdma);
227*4882a593Smuzhiyun vmcoreinfo_append_str("EDMA=%lx\n", __edma);
228*4882a593Smuzhiyun vmcoreinfo_append_str("KERNELOFFSET=%lx\n", kaslr_offset());
229*4882a593Smuzhiyun mem_assign_absolute(S390_lowcore.vmcore_info, paddr_vmcoreinfo_note());
230*4882a593Smuzhiyun }
231*4882a593Smuzhiyun
machine_shutdown(void)232*4882a593Smuzhiyun void machine_shutdown(void)
233*4882a593Smuzhiyun {
234*4882a593Smuzhiyun }
235*4882a593Smuzhiyun
machine_crash_shutdown(struct pt_regs * regs)236*4882a593Smuzhiyun void machine_crash_shutdown(struct pt_regs *regs)
237*4882a593Smuzhiyun {
238*4882a593Smuzhiyun set_os_info_reipl_block();
239*4882a593Smuzhiyun }
240*4882a593Smuzhiyun
241*4882a593Smuzhiyun /*
242*4882a593Smuzhiyun * Do normal kexec
243*4882a593Smuzhiyun */
__do_machine_kexec(void * data)244*4882a593Smuzhiyun static void __do_machine_kexec(void *data)
245*4882a593Smuzhiyun {
246*4882a593Smuzhiyun relocate_kernel_t data_mover;
247*4882a593Smuzhiyun struct kimage *image = data;
248*4882a593Smuzhiyun
249*4882a593Smuzhiyun s390_reset_system();
250*4882a593Smuzhiyun data_mover = (relocate_kernel_t) page_to_phys(image->control_code_page);
251*4882a593Smuzhiyun
252*4882a593Smuzhiyun __arch_local_irq_stnsm(0xfb); /* disable DAT - avoid no-execute */
253*4882a593Smuzhiyun /* Call the moving routine */
254*4882a593Smuzhiyun (*data_mover)(&image->head, image->start);
255*4882a593Smuzhiyun
256*4882a593Smuzhiyun /* Die if kexec returns */
257*4882a593Smuzhiyun disabled_wait();
258*4882a593Smuzhiyun }
259*4882a593Smuzhiyun
260*4882a593Smuzhiyun /*
261*4882a593Smuzhiyun * Reset system and call either kdump or normal kexec
262*4882a593Smuzhiyun */
__machine_kexec(void * data)263*4882a593Smuzhiyun static void __machine_kexec(void *data)
264*4882a593Smuzhiyun {
265*4882a593Smuzhiyun __arch_local_irq_stosm(0x04); /* enable DAT */
266*4882a593Smuzhiyun pfault_fini();
267*4882a593Smuzhiyun tracing_off();
268*4882a593Smuzhiyun debug_locks_off();
269*4882a593Smuzhiyun #ifdef CONFIG_CRASH_DUMP
270*4882a593Smuzhiyun if (((struct kimage *) data)->type == KEXEC_TYPE_CRASH)
271*4882a593Smuzhiyun __machine_kdump(data);
272*4882a593Smuzhiyun #endif
273*4882a593Smuzhiyun __do_machine_kexec(data);
274*4882a593Smuzhiyun }
275*4882a593Smuzhiyun
276*4882a593Smuzhiyun /*
277*4882a593Smuzhiyun * Do either kdump or normal kexec. In case of kdump we first ask
278*4882a593Smuzhiyun * purgatory, if kdump checksums are valid.
279*4882a593Smuzhiyun */
machine_kexec(struct kimage * image)280*4882a593Smuzhiyun void machine_kexec(struct kimage *image)
281*4882a593Smuzhiyun {
282*4882a593Smuzhiyun if (image->type == KEXEC_TYPE_CRASH && !kdump_csum_valid(image))
283*4882a593Smuzhiyun return;
284*4882a593Smuzhiyun tracer_disable();
285*4882a593Smuzhiyun smp_send_stop();
286*4882a593Smuzhiyun smp_call_ipl_cpu(__machine_kexec, image);
287*4882a593Smuzhiyun }
288