xref: /OK3568_Linux_fs/kernel/arch/x86/include/asm/kexec.h (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0 */
2*4882a593Smuzhiyun #ifndef _ASM_X86_KEXEC_H
3*4882a593Smuzhiyun #define _ASM_X86_KEXEC_H
4*4882a593Smuzhiyun 
5*4882a593Smuzhiyun #ifdef CONFIG_X86_32
6*4882a593Smuzhiyun # define PA_CONTROL_PAGE	0
7*4882a593Smuzhiyun # define VA_CONTROL_PAGE	1
8*4882a593Smuzhiyun # define PA_PGD			2
9*4882a593Smuzhiyun # define PA_SWAP_PAGE		3
10*4882a593Smuzhiyun # define PAGES_NR		4
11*4882a593Smuzhiyun #else
12*4882a593Smuzhiyun # define PA_CONTROL_PAGE	0
13*4882a593Smuzhiyun # define VA_CONTROL_PAGE	1
14*4882a593Smuzhiyun # define PA_TABLE_PAGE		2
15*4882a593Smuzhiyun # define PA_SWAP_PAGE		3
16*4882a593Smuzhiyun # define PAGES_NR		4
17*4882a593Smuzhiyun #endif
18*4882a593Smuzhiyun 
19*4882a593Smuzhiyun # define KEXEC_CONTROL_CODE_MAX_SIZE	2048
20*4882a593Smuzhiyun 
21*4882a593Smuzhiyun #ifndef __ASSEMBLY__
22*4882a593Smuzhiyun 
23*4882a593Smuzhiyun #include <linux/string.h>
24*4882a593Smuzhiyun #include <linux/kernel.h>
25*4882a593Smuzhiyun 
26*4882a593Smuzhiyun #include <asm/page.h>
27*4882a593Smuzhiyun #include <asm/ptrace.h>
28*4882a593Smuzhiyun #include <asm/bootparam.h>
29*4882a593Smuzhiyun 
30*4882a593Smuzhiyun struct kimage;
31*4882a593Smuzhiyun 
32*4882a593Smuzhiyun /*
33*4882a593Smuzhiyun  * KEXEC_SOURCE_MEMORY_LIMIT maximum page get_free_page can return.
34*4882a593Smuzhiyun  * I.e. Maximum page that is mapped directly into kernel memory,
35*4882a593Smuzhiyun  * and kmap is not required.
36*4882a593Smuzhiyun  *
37*4882a593Smuzhiyun  * So far x86_64 is limited to 40 physical address bits.
38*4882a593Smuzhiyun  */
39*4882a593Smuzhiyun #ifdef CONFIG_X86_32
40*4882a593Smuzhiyun /* Maximum physical address we can use pages from */
41*4882a593Smuzhiyun # define KEXEC_SOURCE_MEMORY_LIMIT (-1UL)
42*4882a593Smuzhiyun /* Maximum address we can reach in physical address mode */
43*4882a593Smuzhiyun # define KEXEC_DESTINATION_MEMORY_LIMIT (-1UL)
44*4882a593Smuzhiyun /* Maximum address we can use for the control code buffer */
45*4882a593Smuzhiyun # define KEXEC_CONTROL_MEMORY_LIMIT TASK_SIZE
46*4882a593Smuzhiyun 
47*4882a593Smuzhiyun # define KEXEC_CONTROL_PAGE_SIZE	4096
48*4882a593Smuzhiyun 
49*4882a593Smuzhiyun /* The native architecture */
50*4882a593Smuzhiyun # define KEXEC_ARCH KEXEC_ARCH_386
51*4882a593Smuzhiyun 
52*4882a593Smuzhiyun /* We can also handle crash dumps from 64 bit kernel. */
53*4882a593Smuzhiyun # define vmcore_elf_check_arch_cross(x) ((x)->e_machine == EM_X86_64)
54*4882a593Smuzhiyun #else
55*4882a593Smuzhiyun /* Maximum physical address we can use pages from */
56*4882a593Smuzhiyun # define KEXEC_SOURCE_MEMORY_LIMIT      (MAXMEM-1)
57*4882a593Smuzhiyun /* Maximum address we can reach in physical address mode */
58*4882a593Smuzhiyun # define KEXEC_DESTINATION_MEMORY_LIMIT (MAXMEM-1)
59*4882a593Smuzhiyun /* Maximum address we can use for the control pages */
60*4882a593Smuzhiyun # define KEXEC_CONTROL_MEMORY_LIMIT     (MAXMEM-1)
61*4882a593Smuzhiyun 
62*4882a593Smuzhiyun /* Allocate one page for the pdp and the second for the code */
63*4882a593Smuzhiyun # define KEXEC_CONTROL_PAGE_SIZE  (4096UL + 4096UL)
64*4882a593Smuzhiyun 
65*4882a593Smuzhiyun /* The native architecture */
66*4882a593Smuzhiyun # define KEXEC_ARCH KEXEC_ARCH_X86_64
67*4882a593Smuzhiyun #endif
68*4882a593Smuzhiyun 
69*4882a593Smuzhiyun /*
70*4882a593Smuzhiyun  * This function is responsible for capturing register states if coming
71*4882a593Smuzhiyun  * via panic otherwise just fix up the ss and sp if coming via kernel
72*4882a593Smuzhiyun  * mode exception.
73*4882a593Smuzhiyun  */
crash_setup_regs(struct pt_regs * newregs,struct pt_regs * oldregs)74*4882a593Smuzhiyun static inline void crash_setup_regs(struct pt_regs *newregs,
75*4882a593Smuzhiyun 				    struct pt_regs *oldregs)
76*4882a593Smuzhiyun {
77*4882a593Smuzhiyun 	if (oldregs) {
78*4882a593Smuzhiyun 		memcpy(newregs, oldregs, sizeof(*newregs));
79*4882a593Smuzhiyun 	} else {
80*4882a593Smuzhiyun #ifdef CONFIG_X86_32
81*4882a593Smuzhiyun 		asm volatile("movl %%ebx,%0" : "=m"(newregs->bx));
82*4882a593Smuzhiyun 		asm volatile("movl %%ecx,%0" : "=m"(newregs->cx));
83*4882a593Smuzhiyun 		asm volatile("movl %%edx,%0" : "=m"(newregs->dx));
84*4882a593Smuzhiyun 		asm volatile("movl %%esi,%0" : "=m"(newregs->si));
85*4882a593Smuzhiyun 		asm volatile("movl %%edi,%0" : "=m"(newregs->di));
86*4882a593Smuzhiyun 		asm volatile("movl %%ebp,%0" : "=m"(newregs->bp));
87*4882a593Smuzhiyun 		asm volatile("movl %%eax,%0" : "=m"(newregs->ax));
88*4882a593Smuzhiyun 		asm volatile("movl %%esp,%0" : "=m"(newregs->sp));
89*4882a593Smuzhiyun 		asm volatile("movl %%ss, %%eax;" :"=a"(newregs->ss));
90*4882a593Smuzhiyun 		asm volatile("movl %%cs, %%eax;" :"=a"(newregs->cs));
91*4882a593Smuzhiyun 		asm volatile("movl %%ds, %%eax;" :"=a"(newregs->ds));
92*4882a593Smuzhiyun 		asm volatile("movl %%es, %%eax;" :"=a"(newregs->es));
93*4882a593Smuzhiyun 		asm volatile("pushfl; popl %0" :"=m"(newregs->flags));
94*4882a593Smuzhiyun #else
95*4882a593Smuzhiyun 		asm volatile("movq %%rbx,%0" : "=m"(newregs->bx));
96*4882a593Smuzhiyun 		asm volatile("movq %%rcx,%0" : "=m"(newregs->cx));
97*4882a593Smuzhiyun 		asm volatile("movq %%rdx,%0" : "=m"(newregs->dx));
98*4882a593Smuzhiyun 		asm volatile("movq %%rsi,%0" : "=m"(newregs->si));
99*4882a593Smuzhiyun 		asm volatile("movq %%rdi,%0" : "=m"(newregs->di));
100*4882a593Smuzhiyun 		asm volatile("movq %%rbp,%0" : "=m"(newregs->bp));
101*4882a593Smuzhiyun 		asm volatile("movq %%rax,%0" : "=m"(newregs->ax));
102*4882a593Smuzhiyun 		asm volatile("movq %%rsp,%0" : "=m"(newregs->sp));
103*4882a593Smuzhiyun 		asm volatile("movq %%r8,%0" : "=m"(newregs->r8));
104*4882a593Smuzhiyun 		asm volatile("movq %%r9,%0" : "=m"(newregs->r9));
105*4882a593Smuzhiyun 		asm volatile("movq %%r10,%0" : "=m"(newregs->r10));
106*4882a593Smuzhiyun 		asm volatile("movq %%r11,%0" : "=m"(newregs->r11));
107*4882a593Smuzhiyun 		asm volatile("movq %%r12,%0" : "=m"(newregs->r12));
108*4882a593Smuzhiyun 		asm volatile("movq %%r13,%0" : "=m"(newregs->r13));
109*4882a593Smuzhiyun 		asm volatile("movq %%r14,%0" : "=m"(newregs->r14));
110*4882a593Smuzhiyun 		asm volatile("movq %%r15,%0" : "=m"(newregs->r15));
111*4882a593Smuzhiyun 		asm volatile("movl %%ss, %%eax;" :"=a"(newregs->ss));
112*4882a593Smuzhiyun 		asm volatile("movl %%cs, %%eax;" :"=a"(newregs->cs));
113*4882a593Smuzhiyun 		asm volatile("pushfq; popq %0" :"=m"(newregs->flags));
114*4882a593Smuzhiyun #endif
115*4882a593Smuzhiyun 		newregs->ip = _THIS_IP_;
116*4882a593Smuzhiyun 	}
117*4882a593Smuzhiyun }
118*4882a593Smuzhiyun 
119*4882a593Smuzhiyun #ifdef CONFIG_X86_32
120*4882a593Smuzhiyun asmlinkage unsigned long
121*4882a593Smuzhiyun relocate_kernel(unsigned long indirection_page,
122*4882a593Smuzhiyun 		unsigned long control_page,
123*4882a593Smuzhiyun 		unsigned long start_address,
124*4882a593Smuzhiyun 		unsigned int has_pae,
125*4882a593Smuzhiyun 		unsigned int preserve_context);
126*4882a593Smuzhiyun #else
127*4882a593Smuzhiyun unsigned long
128*4882a593Smuzhiyun relocate_kernel(unsigned long indirection_page,
129*4882a593Smuzhiyun 		unsigned long page_list,
130*4882a593Smuzhiyun 		unsigned long start_address,
131*4882a593Smuzhiyun 		unsigned int preserve_context,
132*4882a593Smuzhiyun 		unsigned int sme_active);
133*4882a593Smuzhiyun #endif
134*4882a593Smuzhiyun 
135*4882a593Smuzhiyun #define ARCH_HAS_KIMAGE_ARCH
136*4882a593Smuzhiyun 
137*4882a593Smuzhiyun #ifdef CONFIG_X86_32
138*4882a593Smuzhiyun struct kimage_arch {
139*4882a593Smuzhiyun 	pgd_t *pgd;
140*4882a593Smuzhiyun #ifdef CONFIG_X86_PAE
141*4882a593Smuzhiyun 	pmd_t *pmd0;
142*4882a593Smuzhiyun 	pmd_t *pmd1;
143*4882a593Smuzhiyun #endif
144*4882a593Smuzhiyun 	pte_t *pte0;
145*4882a593Smuzhiyun 	pte_t *pte1;
146*4882a593Smuzhiyun };
147*4882a593Smuzhiyun #else
148*4882a593Smuzhiyun struct kimage_arch {
149*4882a593Smuzhiyun 	p4d_t *p4d;
150*4882a593Smuzhiyun 	pud_t *pud;
151*4882a593Smuzhiyun 	pmd_t *pmd;
152*4882a593Smuzhiyun 	pte_t *pte;
153*4882a593Smuzhiyun 
154*4882a593Smuzhiyun 	/* Core ELF header buffer */
155*4882a593Smuzhiyun 	void *elf_headers;
156*4882a593Smuzhiyun 	unsigned long elf_headers_sz;
157*4882a593Smuzhiyun 	unsigned long elf_load_addr;
158*4882a593Smuzhiyun };
159*4882a593Smuzhiyun #endif /* CONFIG_X86_32 */
160*4882a593Smuzhiyun 
161*4882a593Smuzhiyun #ifdef CONFIG_X86_64
162*4882a593Smuzhiyun /*
163*4882a593Smuzhiyun  * Number of elements and order of elements in this structure should match
164*4882a593Smuzhiyun  * with the ones in arch/x86/purgatory/entry64.S. If you make a change here
165*4882a593Smuzhiyun  * make an appropriate change in purgatory too.
166*4882a593Smuzhiyun  */
167*4882a593Smuzhiyun struct kexec_entry64_regs {
168*4882a593Smuzhiyun 	uint64_t rax;
169*4882a593Smuzhiyun 	uint64_t rcx;
170*4882a593Smuzhiyun 	uint64_t rdx;
171*4882a593Smuzhiyun 	uint64_t rbx;
172*4882a593Smuzhiyun 	uint64_t rsp;
173*4882a593Smuzhiyun 	uint64_t rbp;
174*4882a593Smuzhiyun 	uint64_t rsi;
175*4882a593Smuzhiyun 	uint64_t rdi;
176*4882a593Smuzhiyun 	uint64_t r8;
177*4882a593Smuzhiyun 	uint64_t r9;
178*4882a593Smuzhiyun 	uint64_t r10;
179*4882a593Smuzhiyun 	uint64_t r11;
180*4882a593Smuzhiyun 	uint64_t r12;
181*4882a593Smuzhiyun 	uint64_t r13;
182*4882a593Smuzhiyun 	uint64_t r14;
183*4882a593Smuzhiyun 	uint64_t r15;
184*4882a593Smuzhiyun 	uint64_t rip;
185*4882a593Smuzhiyun };
186*4882a593Smuzhiyun 
187*4882a593Smuzhiyun extern int arch_kexec_post_alloc_pages(void *vaddr, unsigned int pages,
188*4882a593Smuzhiyun 				       gfp_t gfp);
189*4882a593Smuzhiyun #define arch_kexec_post_alloc_pages arch_kexec_post_alloc_pages
190*4882a593Smuzhiyun 
191*4882a593Smuzhiyun extern void arch_kexec_pre_free_pages(void *vaddr, unsigned int pages);
192*4882a593Smuzhiyun #define arch_kexec_pre_free_pages arch_kexec_pre_free_pages
193*4882a593Smuzhiyun 
194*4882a593Smuzhiyun #ifdef CONFIG_KEXEC_FILE
195*4882a593Smuzhiyun struct purgatory_info;
196*4882a593Smuzhiyun int arch_kexec_apply_relocations_add(struct purgatory_info *pi,
197*4882a593Smuzhiyun 				     Elf_Shdr *section,
198*4882a593Smuzhiyun 				     const Elf_Shdr *relsec,
199*4882a593Smuzhiyun 				     const Elf_Shdr *symtab);
200*4882a593Smuzhiyun #define arch_kexec_apply_relocations_add arch_kexec_apply_relocations_add
201*4882a593Smuzhiyun #endif
202*4882a593Smuzhiyun #endif
203*4882a593Smuzhiyun 
204*4882a593Smuzhiyun typedef void crash_vmclear_fn(void);
205*4882a593Smuzhiyun extern crash_vmclear_fn __rcu *crash_vmclear_loaded_vmcss;
206*4882a593Smuzhiyun extern void kdump_nmi_shootdown_cpus(void);
207*4882a593Smuzhiyun 
208*4882a593Smuzhiyun #endif /* __ASSEMBLY__ */
209*4882a593Smuzhiyun 
210*4882a593Smuzhiyun #endif /* _ASM_X86_KEXEC_H */
211