xref: /OK3568_Linux_fs/kernel/arch/x86/realmode/rm/trampoline_64.S (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun/* SPDX-License-Identifier: GPL-2.0 */
2*4882a593Smuzhiyun/*
3*4882a593Smuzhiyun *
4*4882a593Smuzhiyun *	Trampoline.S	Derived from Setup.S by Linus Torvalds
5*4882a593Smuzhiyun *
6*4882a593Smuzhiyun *	4 Jan 1997 Michael Chastain: changed to gnu as.
7*4882a593Smuzhiyun *	15 Sept 2005 Eric Biederman: 64bit PIC support
8*4882a593Smuzhiyun *
9*4882a593Smuzhiyun *	Entry: CS:IP point to the start of our code, we are
10*4882a593Smuzhiyun *	in real mode with no stack, but the rest of the
11*4882a593Smuzhiyun *	trampoline page to make our stack and everything else
12*4882a593Smuzhiyun *	is a mystery.
13*4882a593Smuzhiyun *
14*4882a593Smuzhiyun *	On entry to trampoline_start, the processor is in real mode
15*4882a593Smuzhiyun *	with 16-bit addressing and 16-bit data.  CS has some value
16*4882a593Smuzhiyun *	and IP is zero.  Thus, data addresses need to be absolute
17*4882a593Smuzhiyun *	(no relocation) and are taken with regard to r_base.
18*4882a593Smuzhiyun *
19*4882a593Smuzhiyun *	With the addition of trampoline_level4_pgt this code can
20*4882a593Smuzhiyun *	now enter a 64bit kernel that lives at arbitrary 64bit
21*4882a593Smuzhiyun *	physical addresses.
22*4882a593Smuzhiyun *
23*4882a593Smuzhiyun *	If you work on this file, check the object module with objdump
24*4882a593Smuzhiyun *	--full-contents --reloc to make sure there are no relocation
25*4882a593Smuzhiyun *	entries.
26*4882a593Smuzhiyun */
27*4882a593Smuzhiyun
28*4882a593Smuzhiyun#include <linux/linkage.h>
29*4882a593Smuzhiyun#include <asm/pgtable_types.h>
30*4882a593Smuzhiyun#include <asm/page_types.h>
31*4882a593Smuzhiyun#include <asm/msr.h>
32*4882a593Smuzhiyun#include <asm/segment.h>
33*4882a593Smuzhiyun#include <asm/processor-flags.h>
34*4882a593Smuzhiyun#include <asm/realmode.h>
35*4882a593Smuzhiyun#include "realmode.h"
36*4882a593Smuzhiyun
37*4882a593Smuzhiyun	.text
38*4882a593Smuzhiyun	.code16
39*4882a593Smuzhiyun
40*4882a593Smuzhiyun	.balign	PAGE_SIZE
41*4882a593SmuzhiyunSYM_CODE_START(trampoline_start)
42*4882a593Smuzhiyun	cli			# We should be safe anyway
43*4882a593Smuzhiyun	wbinvd
44*4882a593Smuzhiyun
45*4882a593Smuzhiyun	LJMPW_RM(1f)
46*4882a593Smuzhiyun1:
47*4882a593Smuzhiyun	mov	%cs, %ax	# Code and data in the same place
48*4882a593Smuzhiyun	mov	%ax, %ds
49*4882a593Smuzhiyun	mov	%ax, %es
50*4882a593Smuzhiyun	mov	%ax, %ss
51*4882a593Smuzhiyun
52*4882a593Smuzhiyun	# Setup stack
53*4882a593Smuzhiyun	movl	$rm_stack_end, %esp
54*4882a593Smuzhiyun
55*4882a593Smuzhiyun	call	verify_cpu		# Verify the cpu supports long mode
56*4882a593Smuzhiyun	testl   %eax, %eax		# Check for return code
57*4882a593Smuzhiyun	jnz	no_longmode
58*4882a593Smuzhiyun
59*4882a593Smuzhiyun.Lswitch_to_protected:
60*4882a593Smuzhiyun	/*
61*4882a593Smuzhiyun	 * GDT tables in non default location kernel can be beyond 16MB and
62*4882a593Smuzhiyun	 * lgdt will not be able to load the address as in real mode default
63*4882a593Smuzhiyun	 * operand size is 16bit. Use lgdtl instead to force operand size
64*4882a593Smuzhiyun	 * to 32 bit.
65*4882a593Smuzhiyun	 */
66*4882a593Smuzhiyun
67*4882a593Smuzhiyun	lidtl	tr_idt	# load idt with 0, 0
68*4882a593Smuzhiyun	lgdtl	tr_gdt	# load gdt with whatever is appropriate
69*4882a593Smuzhiyun
70*4882a593Smuzhiyun	movw	$__KERNEL_DS, %dx	# Data segment descriptor
71*4882a593Smuzhiyun
72*4882a593Smuzhiyun	# Enable protected mode
73*4882a593Smuzhiyun	movl	$X86_CR0_PE, %eax	# protected mode (PE) bit
74*4882a593Smuzhiyun	movl	%eax, %cr0		# into protected mode
75*4882a593Smuzhiyun
76*4882a593Smuzhiyun	# flush prefetch and jump to startup_32
77*4882a593Smuzhiyun	ljmpl	$__KERNEL32_CS, $pa_startup_32
78*4882a593Smuzhiyun
79*4882a593Smuzhiyunno_longmode:
80*4882a593Smuzhiyun	hlt
81*4882a593Smuzhiyun	jmp no_longmode
82*4882a593SmuzhiyunSYM_CODE_END(trampoline_start)
83*4882a593Smuzhiyun
84*4882a593Smuzhiyun#ifdef CONFIG_AMD_MEM_ENCRYPT
85*4882a593Smuzhiyun/* SEV-ES supports non-zero IP for entry points - no alignment needed */
86*4882a593SmuzhiyunSYM_CODE_START(sev_es_trampoline_start)
87*4882a593Smuzhiyun	cli			# We should be safe anyway
88*4882a593Smuzhiyun
89*4882a593Smuzhiyun	LJMPW_RM(1f)
90*4882a593Smuzhiyun1:
91*4882a593Smuzhiyun	mov	%cs, %ax	# Code and data in the same place
92*4882a593Smuzhiyun	mov	%ax, %ds
93*4882a593Smuzhiyun	mov	%ax, %es
94*4882a593Smuzhiyun	mov	%ax, %ss
95*4882a593Smuzhiyun
96*4882a593Smuzhiyun	# Setup stack
97*4882a593Smuzhiyun	movl	$rm_stack_end, %esp
98*4882a593Smuzhiyun
99*4882a593Smuzhiyun	jmp	.Lswitch_to_protected
100*4882a593SmuzhiyunSYM_CODE_END(sev_es_trampoline_start)
101*4882a593Smuzhiyun#endif	/* CONFIG_AMD_MEM_ENCRYPT */
102*4882a593Smuzhiyun
103*4882a593Smuzhiyun#include "../kernel/verify_cpu.S"
104*4882a593Smuzhiyun
105*4882a593Smuzhiyun	.section ".text32","ax"
106*4882a593Smuzhiyun	.code32
107*4882a593Smuzhiyun	.balign 4
108*4882a593SmuzhiyunSYM_CODE_START(startup_32)
109*4882a593Smuzhiyun	movl	%edx, %ss
110*4882a593Smuzhiyun	addl	$pa_real_mode_base, %esp
111*4882a593Smuzhiyun	movl	%edx, %ds
112*4882a593Smuzhiyun	movl	%edx, %es
113*4882a593Smuzhiyun	movl	%edx, %fs
114*4882a593Smuzhiyun	movl	%edx, %gs
115*4882a593Smuzhiyun
116*4882a593Smuzhiyun	/*
117*4882a593Smuzhiyun	 * Check for memory encryption support. This is a safety net in
118*4882a593Smuzhiyun	 * case BIOS hasn't done the necessary step of setting the bit in
119*4882a593Smuzhiyun	 * the MSR for this AP. If SME is active and we've gotten this far
120*4882a593Smuzhiyun	 * then it is safe for us to set the MSR bit and continue. If we
121*4882a593Smuzhiyun	 * don't we'll eventually crash trying to execute encrypted
122*4882a593Smuzhiyun	 * instructions.
123*4882a593Smuzhiyun	 */
124*4882a593Smuzhiyun	btl	$TH_FLAGS_SME_ACTIVE_BIT, pa_tr_flags
125*4882a593Smuzhiyun	jnc	.Ldone
126*4882a593Smuzhiyun	movl	$MSR_K8_SYSCFG, %ecx
127*4882a593Smuzhiyun	rdmsr
128*4882a593Smuzhiyun	bts	$MSR_K8_SYSCFG_MEM_ENCRYPT_BIT, %eax
129*4882a593Smuzhiyun	jc	.Ldone
130*4882a593Smuzhiyun
131*4882a593Smuzhiyun	/*
132*4882a593Smuzhiyun	 * Memory encryption is enabled but the SME enable bit for this
133*4882a593Smuzhiyun	 * CPU has has not been set.  It is safe to set it, so do so.
134*4882a593Smuzhiyun	 */
135*4882a593Smuzhiyun	wrmsr
136*4882a593Smuzhiyun.Ldone:
137*4882a593Smuzhiyun
138*4882a593Smuzhiyun	movl	pa_tr_cr4, %eax
139*4882a593Smuzhiyun	movl	%eax, %cr4		# Enable PAE mode
140*4882a593Smuzhiyun
141*4882a593Smuzhiyun	# Setup trampoline 4 level pagetables
142*4882a593Smuzhiyun	movl	$pa_trampoline_pgd, %eax
143*4882a593Smuzhiyun	movl	%eax, %cr3
144*4882a593Smuzhiyun
145*4882a593Smuzhiyun	# Set up EFER
146*4882a593Smuzhiyun	movl	pa_tr_efer, %eax
147*4882a593Smuzhiyun	movl	pa_tr_efer + 4, %edx
148*4882a593Smuzhiyun	movl	$MSR_EFER, %ecx
149*4882a593Smuzhiyun	wrmsr
150*4882a593Smuzhiyun
151*4882a593Smuzhiyun	# Enable paging and in turn activate Long Mode
152*4882a593Smuzhiyun	movl	$(X86_CR0_PG | X86_CR0_WP | X86_CR0_PE), %eax
153*4882a593Smuzhiyun	movl	%eax, %cr0
154*4882a593Smuzhiyun
155*4882a593Smuzhiyun	/*
156*4882a593Smuzhiyun	 * At this point we're in long mode but in 32bit compatibility mode
157*4882a593Smuzhiyun	 * with EFER.LME = 1, CS.L = 0, CS.D = 1 (and in turn
158*4882a593Smuzhiyun	 * EFER.LMA = 1). Now we want to jump in 64bit mode, to do that we use
159*4882a593Smuzhiyun	 * the new gdt/idt that has __KERNEL_CS with CS.L = 1.
160*4882a593Smuzhiyun	 */
161*4882a593Smuzhiyun	ljmpl	$__KERNEL_CS, $pa_startup_64
162*4882a593SmuzhiyunSYM_CODE_END(startup_32)
163*4882a593Smuzhiyun
164*4882a593Smuzhiyun	.section ".text64","ax"
165*4882a593Smuzhiyun	.code64
166*4882a593Smuzhiyun	.balign 4
167*4882a593SmuzhiyunSYM_CODE_START(startup_64)
168*4882a593Smuzhiyun	# Now jump into the kernel using virtual addresses
169*4882a593Smuzhiyun	jmpq	*tr_start(%rip)
170*4882a593SmuzhiyunSYM_CODE_END(startup_64)
171*4882a593Smuzhiyun
172*4882a593Smuzhiyun	.section ".rodata","a"
173*4882a593Smuzhiyun	# Duplicate the global descriptor table
174*4882a593Smuzhiyun	# so the kernel can live anywhere
175*4882a593Smuzhiyun	.balign	16
176*4882a593SmuzhiyunSYM_DATA_START(tr_gdt)
177*4882a593Smuzhiyun	.short	tr_gdt_end - tr_gdt - 1	# gdt limit
178*4882a593Smuzhiyun	.long	pa_tr_gdt
179*4882a593Smuzhiyun	.short	0
180*4882a593Smuzhiyun	.quad	0x00cf9b000000ffff	# __KERNEL32_CS
181*4882a593Smuzhiyun	.quad	0x00af9b000000ffff	# __KERNEL_CS
182*4882a593Smuzhiyun	.quad	0x00cf93000000ffff	# __KERNEL_DS
183*4882a593SmuzhiyunSYM_DATA_END_LABEL(tr_gdt, SYM_L_LOCAL, tr_gdt_end)
184*4882a593Smuzhiyun
185*4882a593Smuzhiyun	.bss
186*4882a593Smuzhiyun	.balign	PAGE_SIZE
187*4882a593SmuzhiyunSYM_DATA(trampoline_pgd, .space PAGE_SIZE)
188*4882a593Smuzhiyun
189*4882a593Smuzhiyun	.balign	8
190*4882a593SmuzhiyunSYM_DATA_START(trampoline_header)
191*4882a593Smuzhiyun	SYM_DATA_LOCAL(tr_start,	.space 8)
192*4882a593Smuzhiyun	SYM_DATA(tr_efer,		.space 8)
193*4882a593Smuzhiyun	SYM_DATA(tr_cr4,		.space 4)
194*4882a593Smuzhiyun	SYM_DATA(tr_flags,		.space 4)
195*4882a593SmuzhiyunSYM_DATA_END(trampoline_header)
196*4882a593Smuzhiyun
197*4882a593Smuzhiyun#include "trampoline_common.S"
198