xref: /OK3568_Linux_fs/kernel/arch/s390/boot/startup.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun #include <linux/string.h>
3*4882a593Smuzhiyun #include <linux/elf.h>
4*4882a593Smuzhiyun #include <asm/sections.h>
5*4882a593Smuzhiyun #include <asm/setup.h>
6*4882a593Smuzhiyun #include <asm/kexec.h>
7*4882a593Smuzhiyun #include <asm/sclp.h>
8*4882a593Smuzhiyun #include <asm/diag.h>
9*4882a593Smuzhiyun #include <asm/uv.h>
10*4882a593Smuzhiyun #include "compressed/decompressor.h"
11*4882a593Smuzhiyun #include "boot.h"
12*4882a593Smuzhiyun 
13*4882a593Smuzhiyun extern char __boot_data_start[], __boot_data_end[];
14*4882a593Smuzhiyun extern char __boot_data_preserved_start[], __boot_data_preserved_end[];
15*4882a593Smuzhiyun unsigned long __bootdata_preserved(__kaslr_offset);
16*4882a593Smuzhiyun 
17*4882a593Smuzhiyun /*
18*4882a593Smuzhiyun  * Some code and data needs to stay below 2 GB, even when the kernel would be
19*4882a593Smuzhiyun  * relocated above 2 GB, because it has to use 31 bit addresses.
20*4882a593Smuzhiyun  * Such code and data is part of the .dma section, and its location is passed
21*4882a593Smuzhiyun  * over to the decompressed / relocated kernel via the .boot.preserved.data
22*4882a593Smuzhiyun  * section.
23*4882a593Smuzhiyun  */
24*4882a593Smuzhiyun extern char _sdma[], _edma[];
25*4882a593Smuzhiyun extern char _stext_dma[], _etext_dma[];
26*4882a593Smuzhiyun extern struct exception_table_entry _start_dma_ex_table[];
27*4882a593Smuzhiyun extern struct exception_table_entry _stop_dma_ex_table[];
28*4882a593Smuzhiyun unsigned long __bootdata_preserved(__sdma) = __pa(&_sdma);
29*4882a593Smuzhiyun unsigned long __bootdata_preserved(__edma) = __pa(&_edma);
30*4882a593Smuzhiyun unsigned long __bootdata_preserved(__stext_dma) = __pa(&_stext_dma);
31*4882a593Smuzhiyun unsigned long __bootdata_preserved(__etext_dma) = __pa(&_etext_dma);
32*4882a593Smuzhiyun struct exception_table_entry *
33*4882a593Smuzhiyun 	__bootdata_preserved(__start_dma_ex_table) = _start_dma_ex_table;
34*4882a593Smuzhiyun struct exception_table_entry *
35*4882a593Smuzhiyun 	__bootdata_preserved(__stop_dma_ex_table) = _stop_dma_ex_table;
36*4882a593Smuzhiyun 
37*4882a593Smuzhiyun int _diag210_dma(struct diag210 *addr);
38*4882a593Smuzhiyun int _diag26c_dma(void *req, void *resp, enum diag26c_sc subcode);
39*4882a593Smuzhiyun int _diag14_dma(unsigned long rx, unsigned long ry1, unsigned long subcode);
40*4882a593Smuzhiyun void _diag0c_dma(struct hypfs_diag0c_entry *entry);
41*4882a593Smuzhiyun void _diag308_reset_dma(void);
42*4882a593Smuzhiyun struct diag_ops __bootdata_preserved(diag_dma_ops) = {
43*4882a593Smuzhiyun 	.diag210 = _diag210_dma,
44*4882a593Smuzhiyun 	.diag26c = _diag26c_dma,
45*4882a593Smuzhiyun 	.diag14 = _diag14_dma,
46*4882a593Smuzhiyun 	.diag0c = _diag0c_dma,
47*4882a593Smuzhiyun 	.diag308_reset = _diag308_reset_dma
48*4882a593Smuzhiyun };
49*4882a593Smuzhiyun static struct diag210 _diag210_tmp_dma __section(".dma.data");
50*4882a593Smuzhiyun struct diag210 *__bootdata_preserved(__diag210_tmp_dma) = &_diag210_tmp_dma;
51*4882a593Smuzhiyun 
error(char * x)52*4882a593Smuzhiyun void error(char *x)
53*4882a593Smuzhiyun {
54*4882a593Smuzhiyun 	sclp_early_printk("\n\n");
55*4882a593Smuzhiyun 	sclp_early_printk(x);
56*4882a593Smuzhiyun 	sclp_early_printk("\n\n -- System halted");
57*4882a593Smuzhiyun 
58*4882a593Smuzhiyun 	disabled_wait();
59*4882a593Smuzhiyun }
60*4882a593Smuzhiyun 
61*4882a593Smuzhiyun #ifdef CONFIG_KERNEL_UNCOMPRESSED
mem_safe_offset(void)62*4882a593Smuzhiyun unsigned long mem_safe_offset(void)
63*4882a593Smuzhiyun {
64*4882a593Smuzhiyun 	return vmlinux.default_lma + vmlinux.image_size + vmlinux.bss_size;
65*4882a593Smuzhiyun }
66*4882a593Smuzhiyun #endif
67*4882a593Smuzhiyun 
rescue_initrd(unsigned long addr)68*4882a593Smuzhiyun static void rescue_initrd(unsigned long addr)
69*4882a593Smuzhiyun {
70*4882a593Smuzhiyun 	if (!IS_ENABLED(CONFIG_BLK_DEV_INITRD))
71*4882a593Smuzhiyun 		return;
72*4882a593Smuzhiyun 	if (!INITRD_START || !INITRD_SIZE)
73*4882a593Smuzhiyun 		return;
74*4882a593Smuzhiyun 	if (addr <= INITRD_START)
75*4882a593Smuzhiyun 		return;
76*4882a593Smuzhiyun 	memmove((void *)addr, (void *)INITRD_START, INITRD_SIZE);
77*4882a593Smuzhiyun 	INITRD_START = addr;
78*4882a593Smuzhiyun }
79*4882a593Smuzhiyun 
copy_bootdata(void)80*4882a593Smuzhiyun static void copy_bootdata(void)
81*4882a593Smuzhiyun {
82*4882a593Smuzhiyun 	if (__boot_data_end - __boot_data_start != vmlinux.bootdata_size)
83*4882a593Smuzhiyun 		error(".boot.data section size mismatch");
84*4882a593Smuzhiyun 	memcpy((void *)vmlinux.bootdata_off, __boot_data_start, vmlinux.bootdata_size);
85*4882a593Smuzhiyun 	if (__boot_data_preserved_end - __boot_data_preserved_start != vmlinux.bootdata_preserved_size)
86*4882a593Smuzhiyun 		error(".boot.preserved.data section size mismatch");
87*4882a593Smuzhiyun 	memcpy((void *)vmlinux.bootdata_preserved_off, __boot_data_preserved_start, vmlinux.bootdata_preserved_size);
88*4882a593Smuzhiyun }
89*4882a593Smuzhiyun 
handle_relocs(unsigned long offset)90*4882a593Smuzhiyun static void handle_relocs(unsigned long offset)
91*4882a593Smuzhiyun {
92*4882a593Smuzhiyun 	Elf64_Rela *rela_start, *rela_end, *rela;
93*4882a593Smuzhiyun 	int r_type, r_sym, rc;
94*4882a593Smuzhiyun 	Elf64_Addr loc, val;
95*4882a593Smuzhiyun 	Elf64_Sym *dynsym;
96*4882a593Smuzhiyun 
97*4882a593Smuzhiyun 	rela_start = (Elf64_Rela *) vmlinux.rela_dyn_start;
98*4882a593Smuzhiyun 	rela_end = (Elf64_Rela *) vmlinux.rela_dyn_end;
99*4882a593Smuzhiyun 	dynsym = (Elf64_Sym *) vmlinux.dynsym_start;
100*4882a593Smuzhiyun 	for (rela = rela_start; rela < rela_end; rela++) {
101*4882a593Smuzhiyun 		loc = rela->r_offset + offset;
102*4882a593Smuzhiyun 		val = rela->r_addend;
103*4882a593Smuzhiyun 		r_sym = ELF64_R_SYM(rela->r_info);
104*4882a593Smuzhiyun 		if (r_sym) {
105*4882a593Smuzhiyun 			if (dynsym[r_sym].st_shndx != SHN_UNDEF)
106*4882a593Smuzhiyun 				val += dynsym[r_sym].st_value + offset;
107*4882a593Smuzhiyun 		} else {
108*4882a593Smuzhiyun 			/*
109*4882a593Smuzhiyun 			 * 0 == undefined symbol table index (STN_UNDEF),
110*4882a593Smuzhiyun 			 * used for R_390_RELATIVE, only add KASLR offset
111*4882a593Smuzhiyun 			 */
112*4882a593Smuzhiyun 			val += offset;
113*4882a593Smuzhiyun 		}
114*4882a593Smuzhiyun 		r_type = ELF64_R_TYPE(rela->r_info);
115*4882a593Smuzhiyun 		rc = arch_kexec_do_relocs(r_type, (void *) loc, val, 0);
116*4882a593Smuzhiyun 		if (rc)
117*4882a593Smuzhiyun 			error("Unknown relocation type");
118*4882a593Smuzhiyun 	}
119*4882a593Smuzhiyun }
120*4882a593Smuzhiyun 
121*4882a593Smuzhiyun /*
122*4882a593Smuzhiyun  * This function clears the BSS section of the decompressed Linux kernel and NOT the decompressor's.
123*4882a593Smuzhiyun  */
clear_bss_section(void)124*4882a593Smuzhiyun static void clear_bss_section(void)
125*4882a593Smuzhiyun {
126*4882a593Smuzhiyun 	memset((void *)vmlinux.default_lma + vmlinux.image_size, 0, vmlinux.bss_size);
127*4882a593Smuzhiyun }
128*4882a593Smuzhiyun 
startup_kernel(void)129*4882a593Smuzhiyun void startup_kernel(void)
130*4882a593Smuzhiyun {
131*4882a593Smuzhiyun 	unsigned long random_lma;
132*4882a593Smuzhiyun 	unsigned long safe_addr;
133*4882a593Smuzhiyun 	void *img;
134*4882a593Smuzhiyun 
135*4882a593Smuzhiyun 	store_ipl_parmblock();
136*4882a593Smuzhiyun 	safe_addr = mem_safe_offset();
137*4882a593Smuzhiyun 	safe_addr = read_ipl_report(safe_addr);
138*4882a593Smuzhiyun 	uv_query_info();
139*4882a593Smuzhiyun 	rescue_initrd(safe_addr);
140*4882a593Smuzhiyun 	sclp_early_read_info();
141*4882a593Smuzhiyun 	setup_boot_command_line();
142*4882a593Smuzhiyun 	parse_boot_command_line();
143*4882a593Smuzhiyun 	setup_memory_end();
144*4882a593Smuzhiyun 	detect_memory();
145*4882a593Smuzhiyun 
146*4882a593Smuzhiyun 	random_lma = __kaslr_offset = 0;
147*4882a593Smuzhiyun 	if (IS_ENABLED(CONFIG_RANDOMIZE_BASE) && kaslr_enabled) {
148*4882a593Smuzhiyun 		random_lma = get_random_base(safe_addr);
149*4882a593Smuzhiyun 		if (random_lma) {
150*4882a593Smuzhiyun 			__kaslr_offset = random_lma - vmlinux.default_lma;
151*4882a593Smuzhiyun 			img = (void *)vmlinux.default_lma;
152*4882a593Smuzhiyun 			vmlinux.default_lma += __kaslr_offset;
153*4882a593Smuzhiyun 			vmlinux.entry += __kaslr_offset;
154*4882a593Smuzhiyun 			vmlinux.bootdata_off += __kaslr_offset;
155*4882a593Smuzhiyun 			vmlinux.bootdata_preserved_off += __kaslr_offset;
156*4882a593Smuzhiyun 			vmlinux.rela_dyn_start += __kaslr_offset;
157*4882a593Smuzhiyun 			vmlinux.rela_dyn_end += __kaslr_offset;
158*4882a593Smuzhiyun 			vmlinux.dynsym_start += __kaslr_offset;
159*4882a593Smuzhiyun 		}
160*4882a593Smuzhiyun 	}
161*4882a593Smuzhiyun 
162*4882a593Smuzhiyun 	if (!IS_ENABLED(CONFIG_KERNEL_UNCOMPRESSED)) {
163*4882a593Smuzhiyun 		img = decompress_kernel();
164*4882a593Smuzhiyun 		memmove((void *)vmlinux.default_lma, img, vmlinux.image_size);
165*4882a593Smuzhiyun 	} else if (__kaslr_offset)
166*4882a593Smuzhiyun 		memcpy((void *)vmlinux.default_lma, img, vmlinux.image_size);
167*4882a593Smuzhiyun 
168*4882a593Smuzhiyun 	clear_bss_section();
169*4882a593Smuzhiyun 	copy_bootdata();
170*4882a593Smuzhiyun 	if (IS_ENABLED(CONFIG_RELOCATABLE))
171*4882a593Smuzhiyun 		handle_relocs(__kaslr_offset);
172*4882a593Smuzhiyun 
173*4882a593Smuzhiyun 	if (__kaslr_offset) {
174*4882a593Smuzhiyun 		/*
175*4882a593Smuzhiyun 		 * Save KASLR offset for early dumps, before vmcore_info is set.
176*4882a593Smuzhiyun 		 * Mark as uneven to distinguish from real vmcore_info pointer.
177*4882a593Smuzhiyun 		 */
178*4882a593Smuzhiyun 		S390_lowcore.vmcore_info = __kaslr_offset | 0x1UL;
179*4882a593Smuzhiyun 		/* Clear non-relocated kernel */
180*4882a593Smuzhiyun 		if (IS_ENABLED(CONFIG_KERNEL_UNCOMPRESSED))
181*4882a593Smuzhiyun 			memset(img, 0, vmlinux.image_size);
182*4882a593Smuzhiyun 	}
183*4882a593Smuzhiyun 	vmlinux.entry();
184*4882a593Smuzhiyun }
185