xref: /OK3568_Linux_fs/kernel/arch/x86/kvm/svm/vmenter.S (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun/* SPDX-License-Identifier: GPL-2.0 */
2*4882a593Smuzhiyun#include <linux/linkage.h>
3*4882a593Smuzhiyun#include <asm/asm.h>
4*4882a593Smuzhiyun#include <asm/bitsperlong.h>
5*4882a593Smuzhiyun#include <asm/kvm_vcpu_regs.h>
6*4882a593Smuzhiyun#include <asm/nospec-branch.h>
7*4882a593Smuzhiyun
8*4882a593Smuzhiyun#define WORD_SIZE (BITS_PER_LONG / 8)
9*4882a593Smuzhiyun
10*4882a593Smuzhiyun/* Intentionally omit RAX as it's context switched by hardware */
11*4882a593Smuzhiyun#define VCPU_RCX	__VCPU_REGS_RCX * WORD_SIZE
12*4882a593Smuzhiyun#define VCPU_RDX	__VCPU_REGS_RDX * WORD_SIZE
13*4882a593Smuzhiyun#define VCPU_RBX	__VCPU_REGS_RBX * WORD_SIZE
14*4882a593Smuzhiyun/* Intentionally omit RSP as it's context switched by hardware */
15*4882a593Smuzhiyun#define VCPU_RBP	__VCPU_REGS_RBP * WORD_SIZE
16*4882a593Smuzhiyun#define VCPU_RSI	__VCPU_REGS_RSI * WORD_SIZE
17*4882a593Smuzhiyun#define VCPU_RDI	__VCPU_REGS_RDI * WORD_SIZE
18*4882a593Smuzhiyun
19*4882a593Smuzhiyun#ifdef CONFIG_X86_64
20*4882a593Smuzhiyun#define VCPU_R8		__VCPU_REGS_R8  * WORD_SIZE
21*4882a593Smuzhiyun#define VCPU_R9		__VCPU_REGS_R9  * WORD_SIZE
22*4882a593Smuzhiyun#define VCPU_R10	__VCPU_REGS_R10 * WORD_SIZE
23*4882a593Smuzhiyun#define VCPU_R11	__VCPU_REGS_R11 * WORD_SIZE
24*4882a593Smuzhiyun#define VCPU_R12	__VCPU_REGS_R12 * WORD_SIZE
25*4882a593Smuzhiyun#define VCPU_R13	__VCPU_REGS_R13 * WORD_SIZE
26*4882a593Smuzhiyun#define VCPU_R14	__VCPU_REGS_R14 * WORD_SIZE
27*4882a593Smuzhiyun#define VCPU_R15	__VCPU_REGS_R15 * WORD_SIZE
28*4882a593Smuzhiyun#endif
29*4882a593Smuzhiyun
30*4882a593Smuzhiyun.section .noinstr.text, "ax"
31*4882a593Smuzhiyun
32*4882a593Smuzhiyun/**
33*4882a593Smuzhiyun * __svm_vcpu_run - Run a vCPU via a transition to SVM guest mode
34*4882a593Smuzhiyun * @vmcb_pa:	unsigned long
35*4882a593Smuzhiyun * @regs:	unsigned long * (to guest registers)
36*4882a593Smuzhiyun */
37*4882a593SmuzhiyunSYM_FUNC_START(__svm_vcpu_run)
38*4882a593Smuzhiyun	push %_ASM_BP
39*4882a593Smuzhiyun#ifdef CONFIG_X86_64
40*4882a593Smuzhiyun	push %r15
41*4882a593Smuzhiyun	push %r14
42*4882a593Smuzhiyun	push %r13
43*4882a593Smuzhiyun	push %r12
44*4882a593Smuzhiyun#else
45*4882a593Smuzhiyun	push %edi
46*4882a593Smuzhiyun	push %esi
47*4882a593Smuzhiyun#endif
48*4882a593Smuzhiyun	push %_ASM_BX
49*4882a593Smuzhiyun
50*4882a593Smuzhiyun	/* Save @regs. */
51*4882a593Smuzhiyun	push %_ASM_ARG2
52*4882a593Smuzhiyun
53*4882a593Smuzhiyun	/* Save @vmcb. */
54*4882a593Smuzhiyun	push %_ASM_ARG1
55*4882a593Smuzhiyun
56*4882a593Smuzhiyun	/* Move @regs to RAX. */
57*4882a593Smuzhiyun	mov %_ASM_ARG2, %_ASM_AX
58*4882a593Smuzhiyun
59*4882a593Smuzhiyun	/* Load guest registers. */
60*4882a593Smuzhiyun	mov VCPU_RCX(%_ASM_AX), %_ASM_CX
61*4882a593Smuzhiyun	mov VCPU_RDX(%_ASM_AX), %_ASM_DX
62*4882a593Smuzhiyun	mov VCPU_RBX(%_ASM_AX), %_ASM_BX
63*4882a593Smuzhiyun	mov VCPU_RBP(%_ASM_AX), %_ASM_BP
64*4882a593Smuzhiyun	mov VCPU_RSI(%_ASM_AX), %_ASM_SI
65*4882a593Smuzhiyun	mov VCPU_RDI(%_ASM_AX), %_ASM_DI
66*4882a593Smuzhiyun#ifdef CONFIG_X86_64
67*4882a593Smuzhiyun	mov VCPU_R8 (%_ASM_AX),  %r8
68*4882a593Smuzhiyun	mov VCPU_R9 (%_ASM_AX),  %r9
69*4882a593Smuzhiyun	mov VCPU_R10(%_ASM_AX), %r10
70*4882a593Smuzhiyun	mov VCPU_R11(%_ASM_AX), %r11
71*4882a593Smuzhiyun	mov VCPU_R12(%_ASM_AX), %r12
72*4882a593Smuzhiyun	mov VCPU_R13(%_ASM_AX), %r13
73*4882a593Smuzhiyun	mov VCPU_R14(%_ASM_AX), %r14
74*4882a593Smuzhiyun	mov VCPU_R15(%_ASM_AX), %r15
75*4882a593Smuzhiyun#endif
76*4882a593Smuzhiyun
77*4882a593Smuzhiyun	/* "POP" @vmcb to RAX. */
78*4882a593Smuzhiyun	pop %_ASM_AX
79*4882a593Smuzhiyun
80*4882a593Smuzhiyun	/* Enter guest mode */
81*4882a593Smuzhiyun	sti
82*4882a593Smuzhiyun1:	vmload %_ASM_AX
83*4882a593Smuzhiyun	jmp 3f
84*4882a593Smuzhiyun2:	cmpb $0, kvm_rebooting
85*4882a593Smuzhiyun	jne 3f
86*4882a593Smuzhiyun	ud2
87*4882a593Smuzhiyun	_ASM_EXTABLE(1b, 2b)
88*4882a593Smuzhiyun
89*4882a593Smuzhiyun3:	vmrun %_ASM_AX
90*4882a593Smuzhiyun	jmp 5f
91*4882a593Smuzhiyun4:	cmpb $0, kvm_rebooting
92*4882a593Smuzhiyun	jne 5f
93*4882a593Smuzhiyun	ud2
94*4882a593Smuzhiyun	_ASM_EXTABLE(3b, 4b)
95*4882a593Smuzhiyun
96*4882a593Smuzhiyun5:	vmsave %_ASM_AX
97*4882a593Smuzhiyun	jmp 7f
98*4882a593Smuzhiyun6:	cmpb $0, kvm_rebooting
99*4882a593Smuzhiyun	jne 7f
100*4882a593Smuzhiyun	ud2
101*4882a593Smuzhiyun	_ASM_EXTABLE(5b, 6b)
102*4882a593Smuzhiyun7:
103*4882a593Smuzhiyun	cli
104*4882a593Smuzhiyun
105*4882a593Smuzhiyun#ifdef CONFIG_RETPOLINE
106*4882a593Smuzhiyun	/* IMPORTANT: Stuff the RSB immediately after VM-Exit, before RET! */
107*4882a593Smuzhiyun	FILL_RETURN_BUFFER %_ASM_AX, RSB_CLEAR_LOOPS, X86_FEATURE_RETPOLINE
108*4882a593Smuzhiyun#endif
109*4882a593Smuzhiyun
110*4882a593Smuzhiyun	/* "POP" @regs to RAX. */
111*4882a593Smuzhiyun	pop %_ASM_AX
112*4882a593Smuzhiyun
113*4882a593Smuzhiyun	/* Save all guest registers.  */
114*4882a593Smuzhiyun	mov %_ASM_CX,   VCPU_RCX(%_ASM_AX)
115*4882a593Smuzhiyun	mov %_ASM_DX,   VCPU_RDX(%_ASM_AX)
116*4882a593Smuzhiyun	mov %_ASM_BX,   VCPU_RBX(%_ASM_AX)
117*4882a593Smuzhiyun	mov %_ASM_BP,   VCPU_RBP(%_ASM_AX)
118*4882a593Smuzhiyun	mov %_ASM_SI,   VCPU_RSI(%_ASM_AX)
119*4882a593Smuzhiyun	mov %_ASM_DI,   VCPU_RDI(%_ASM_AX)
120*4882a593Smuzhiyun#ifdef CONFIG_X86_64
121*4882a593Smuzhiyun	mov %r8,  VCPU_R8 (%_ASM_AX)
122*4882a593Smuzhiyun	mov %r9,  VCPU_R9 (%_ASM_AX)
123*4882a593Smuzhiyun	mov %r10, VCPU_R10(%_ASM_AX)
124*4882a593Smuzhiyun	mov %r11, VCPU_R11(%_ASM_AX)
125*4882a593Smuzhiyun	mov %r12, VCPU_R12(%_ASM_AX)
126*4882a593Smuzhiyun	mov %r13, VCPU_R13(%_ASM_AX)
127*4882a593Smuzhiyun	mov %r14, VCPU_R14(%_ASM_AX)
128*4882a593Smuzhiyun	mov %r15, VCPU_R15(%_ASM_AX)
129*4882a593Smuzhiyun#endif
130*4882a593Smuzhiyun
131*4882a593Smuzhiyun	/*
132*4882a593Smuzhiyun	 * Mitigate RETBleed for AMD/Hygon Zen uarch. RET should be
133*4882a593Smuzhiyun	 * untrained as soon as we exit the VM and are back to the
134*4882a593Smuzhiyun	 * kernel. This should be done before re-enabling interrupts
135*4882a593Smuzhiyun	 * because interrupt handlers won't sanitize 'ret' if the return is
136*4882a593Smuzhiyun	 * from the kernel.
137*4882a593Smuzhiyun	 */
138*4882a593Smuzhiyun	UNTRAIN_RET
139*4882a593Smuzhiyun
140*4882a593Smuzhiyun	/*
141*4882a593Smuzhiyun	 * Clear all general purpose registers except RSP and RAX to prevent
142*4882a593Smuzhiyun	 * speculative use of the guest's values, even those that are reloaded
143*4882a593Smuzhiyun	 * via the stack.  In theory, an L1 cache miss when restoring registers
144*4882a593Smuzhiyun	 * could lead to speculative execution with the guest's values.
145*4882a593Smuzhiyun	 * Zeroing XORs are dirt cheap, i.e. the extra paranoia is essentially
146*4882a593Smuzhiyun	 * free.  RSP and RAX are exempt as they are restored by hardware
147*4882a593Smuzhiyun	 * during VM-Exit.
148*4882a593Smuzhiyun	 */
149*4882a593Smuzhiyun	xor %ecx, %ecx
150*4882a593Smuzhiyun	xor %edx, %edx
151*4882a593Smuzhiyun	xor %ebx, %ebx
152*4882a593Smuzhiyun	xor %ebp, %ebp
153*4882a593Smuzhiyun	xor %esi, %esi
154*4882a593Smuzhiyun	xor %edi, %edi
155*4882a593Smuzhiyun#ifdef CONFIG_X86_64
156*4882a593Smuzhiyun	xor %r8d,  %r8d
157*4882a593Smuzhiyun	xor %r9d,  %r9d
158*4882a593Smuzhiyun	xor %r10d, %r10d
159*4882a593Smuzhiyun	xor %r11d, %r11d
160*4882a593Smuzhiyun	xor %r12d, %r12d
161*4882a593Smuzhiyun	xor %r13d, %r13d
162*4882a593Smuzhiyun	xor %r14d, %r14d
163*4882a593Smuzhiyun	xor %r15d, %r15d
164*4882a593Smuzhiyun#endif
165*4882a593Smuzhiyun
166*4882a593Smuzhiyun	pop %_ASM_BX
167*4882a593Smuzhiyun
168*4882a593Smuzhiyun#ifdef CONFIG_X86_64
169*4882a593Smuzhiyun	pop %r12
170*4882a593Smuzhiyun	pop %r13
171*4882a593Smuzhiyun	pop %r14
172*4882a593Smuzhiyun	pop %r15
173*4882a593Smuzhiyun#else
174*4882a593Smuzhiyun	pop %esi
175*4882a593Smuzhiyun	pop %edi
176*4882a593Smuzhiyun#endif
177*4882a593Smuzhiyun	pop %_ASM_BP
178*4882a593Smuzhiyun	RET
179*4882a593SmuzhiyunSYM_FUNC_END(__svm_vcpu_run)
180