1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0 */
2*4882a593Smuzhiyun #ifndef __KVM_X86_VMX_INSN_H
3*4882a593Smuzhiyun #define __KVM_X86_VMX_INSN_H
4*4882a593Smuzhiyun
5*4882a593Smuzhiyun #include <linux/nospec.h>
6*4882a593Smuzhiyun
7*4882a593Smuzhiyun #include <asm/kvm_host.h>
8*4882a593Smuzhiyun #include <asm/vmx.h>
9*4882a593Smuzhiyun
10*4882a593Smuzhiyun #include "evmcs.h"
11*4882a593Smuzhiyun #include "vmcs.h"
12*4882a593Smuzhiyun
13*4882a593Smuzhiyun #define __ex(x) __kvm_handle_fault_on_reboot(x)
14*4882a593Smuzhiyun
15*4882a593Smuzhiyun asmlinkage void vmread_error(unsigned long field, bool fault);
16*4882a593Smuzhiyun __attribute__((regparm(0))) void vmread_error_trampoline(unsigned long field,
17*4882a593Smuzhiyun bool fault);
18*4882a593Smuzhiyun void vmwrite_error(unsigned long field, unsigned long value);
19*4882a593Smuzhiyun void vmclear_error(struct vmcs *vmcs, u64 phys_addr);
20*4882a593Smuzhiyun void vmptrld_error(struct vmcs *vmcs, u64 phys_addr);
21*4882a593Smuzhiyun void invvpid_error(unsigned long ext, u16 vpid, gva_t gva);
22*4882a593Smuzhiyun void invept_error(unsigned long ext, u64 eptp, gpa_t gpa);
23*4882a593Smuzhiyun
vmcs_check16(unsigned long field)24*4882a593Smuzhiyun static __always_inline void vmcs_check16(unsigned long field)
25*4882a593Smuzhiyun {
26*4882a593Smuzhiyun BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6001) == 0x2000,
27*4882a593Smuzhiyun "16-bit accessor invalid for 64-bit field");
28*4882a593Smuzhiyun BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6001) == 0x2001,
29*4882a593Smuzhiyun "16-bit accessor invalid for 64-bit high field");
30*4882a593Smuzhiyun BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x4000,
31*4882a593Smuzhiyun "16-bit accessor invalid for 32-bit high field");
32*4882a593Smuzhiyun BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x6000,
33*4882a593Smuzhiyun "16-bit accessor invalid for natural width field");
34*4882a593Smuzhiyun }
35*4882a593Smuzhiyun
vmcs_check32(unsigned long field)36*4882a593Smuzhiyun static __always_inline void vmcs_check32(unsigned long field)
37*4882a593Smuzhiyun {
38*4882a593Smuzhiyun BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0,
39*4882a593Smuzhiyun "32-bit accessor invalid for 16-bit field");
40*4882a593Smuzhiyun BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x6000,
41*4882a593Smuzhiyun "32-bit accessor invalid for natural width field");
42*4882a593Smuzhiyun }
43*4882a593Smuzhiyun
vmcs_check64(unsigned long field)44*4882a593Smuzhiyun static __always_inline void vmcs_check64(unsigned long field)
45*4882a593Smuzhiyun {
46*4882a593Smuzhiyun BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0,
47*4882a593Smuzhiyun "64-bit accessor invalid for 16-bit field");
48*4882a593Smuzhiyun BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6001) == 0x2001,
49*4882a593Smuzhiyun "64-bit accessor invalid for 64-bit high field");
50*4882a593Smuzhiyun BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x4000,
51*4882a593Smuzhiyun "64-bit accessor invalid for 32-bit field");
52*4882a593Smuzhiyun BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x6000,
53*4882a593Smuzhiyun "64-bit accessor invalid for natural width field");
54*4882a593Smuzhiyun }
55*4882a593Smuzhiyun
vmcs_checkl(unsigned long field)56*4882a593Smuzhiyun static __always_inline void vmcs_checkl(unsigned long field)
57*4882a593Smuzhiyun {
58*4882a593Smuzhiyun BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0,
59*4882a593Smuzhiyun "Natural width accessor invalid for 16-bit field");
60*4882a593Smuzhiyun BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6001) == 0x2000,
61*4882a593Smuzhiyun "Natural width accessor invalid for 64-bit field");
62*4882a593Smuzhiyun BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6001) == 0x2001,
63*4882a593Smuzhiyun "Natural width accessor invalid for 64-bit high field");
64*4882a593Smuzhiyun BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x4000,
65*4882a593Smuzhiyun "Natural width accessor invalid for 32-bit field");
66*4882a593Smuzhiyun }
67*4882a593Smuzhiyun
__vmcs_readl(unsigned long field)68*4882a593Smuzhiyun static __always_inline unsigned long __vmcs_readl(unsigned long field)
69*4882a593Smuzhiyun {
70*4882a593Smuzhiyun unsigned long value;
71*4882a593Smuzhiyun
72*4882a593Smuzhiyun asm volatile("1: vmread %2, %1\n\t"
73*4882a593Smuzhiyun ".byte 0x3e\n\t" /* branch taken hint */
74*4882a593Smuzhiyun "ja 3f\n\t"
75*4882a593Smuzhiyun
76*4882a593Smuzhiyun /*
77*4882a593Smuzhiyun * VMREAD failed. Push '0' for @fault, push the failing
78*4882a593Smuzhiyun * @field, and bounce through the trampoline to preserve
79*4882a593Smuzhiyun * volatile registers.
80*4882a593Smuzhiyun */
81*4882a593Smuzhiyun "push $0\n\t"
82*4882a593Smuzhiyun "push %2\n\t"
83*4882a593Smuzhiyun "2:call vmread_error_trampoline\n\t"
84*4882a593Smuzhiyun
85*4882a593Smuzhiyun /*
86*4882a593Smuzhiyun * Unwind the stack. Note, the trampoline zeros out the
87*4882a593Smuzhiyun * memory for @fault so that the result is '0' on error.
88*4882a593Smuzhiyun */
89*4882a593Smuzhiyun "pop %2\n\t"
90*4882a593Smuzhiyun "pop %1\n\t"
91*4882a593Smuzhiyun "3:\n\t"
92*4882a593Smuzhiyun
93*4882a593Smuzhiyun /* VMREAD faulted. As above, except push '1' for @fault. */
94*4882a593Smuzhiyun ".pushsection .fixup, \"ax\"\n\t"
95*4882a593Smuzhiyun "4: push $1\n\t"
96*4882a593Smuzhiyun "push %2\n\t"
97*4882a593Smuzhiyun "jmp 2b\n\t"
98*4882a593Smuzhiyun ".popsection\n\t"
99*4882a593Smuzhiyun _ASM_EXTABLE(1b, 4b)
100*4882a593Smuzhiyun : ASM_CALL_CONSTRAINT, "=r"(value) : "r"(field) : "cc");
101*4882a593Smuzhiyun return value;
102*4882a593Smuzhiyun }
103*4882a593Smuzhiyun
vmcs_read16(unsigned long field)104*4882a593Smuzhiyun static __always_inline u16 vmcs_read16(unsigned long field)
105*4882a593Smuzhiyun {
106*4882a593Smuzhiyun vmcs_check16(field);
107*4882a593Smuzhiyun if (static_branch_unlikely(&enable_evmcs))
108*4882a593Smuzhiyun return evmcs_read16(field);
109*4882a593Smuzhiyun return __vmcs_readl(field);
110*4882a593Smuzhiyun }
111*4882a593Smuzhiyun
vmcs_read32(unsigned long field)112*4882a593Smuzhiyun static __always_inline u32 vmcs_read32(unsigned long field)
113*4882a593Smuzhiyun {
114*4882a593Smuzhiyun vmcs_check32(field);
115*4882a593Smuzhiyun if (static_branch_unlikely(&enable_evmcs))
116*4882a593Smuzhiyun return evmcs_read32(field);
117*4882a593Smuzhiyun return __vmcs_readl(field);
118*4882a593Smuzhiyun }
119*4882a593Smuzhiyun
vmcs_read64(unsigned long field)120*4882a593Smuzhiyun static __always_inline u64 vmcs_read64(unsigned long field)
121*4882a593Smuzhiyun {
122*4882a593Smuzhiyun vmcs_check64(field);
123*4882a593Smuzhiyun if (static_branch_unlikely(&enable_evmcs))
124*4882a593Smuzhiyun return evmcs_read64(field);
125*4882a593Smuzhiyun #ifdef CONFIG_X86_64
126*4882a593Smuzhiyun return __vmcs_readl(field);
127*4882a593Smuzhiyun #else
128*4882a593Smuzhiyun return __vmcs_readl(field) | ((u64)__vmcs_readl(field+1) << 32);
129*4882a593Smuzhiyun #endif
130*4882a593Smuzhiyun }
131*4882a593Smuzhiyun
vmcs_readl(unsigned long field)132*4882a593Smuzhiyun static __always_inline unsigned long vmcs_readl(unsigned long field)
133*4882a593Smuzhiyun {
134*4882a593Smuzhiyun vmcs_checkl(field);
135*4882a593Smuzhiyun if (static_branch_unlikely(&enable_evmcs))
136*4882a593Smuzhiyun return evmcs_read64(field);
137*4882a593Smuzhiyun return __vmcs_readl(field);
138*4882a593Smuzhiyun }
139*4882a593Smuzhiyun
140*4882a593Smuzhiyun #define vmx_asm1(insn, op1, error_args...) \
141*4882a593Smuzhiyun do { \
142*4882a593Smuzhiyun asm_volatile_goto("1: " __stringify(insn) " %0\n\t" \
143*4882a593Smuzhiyun ".byte 0x2e\n\t" /* branch not taken hint */ \
144*4882a593Smuzhiyun "jna %l[error]\n\t" \
145*4882a593Smuzhiyun _ASM_EXTABLE(1b, %l[fault]) \
146*4882a593Smuzhiyun : : op1 : "cc" : error, fault); \
147*4882a593Smuzhiyun return; \
148*4882a593Smuzhiyun error: \
149*4882a593Smuzhiyun instrumentation_begin(); \
150*4882a593Smuzhiyun insn##_error(error_args); \
151*4882a593Smuzhiyun instrumentation_end(); \
152*4882a593Smuzhiyun return; \
153*4882a593Smuzhiyun fault: \
154*4882a593Smuzhiyun kvm_spurious_fault(); \
155*4882a593Smuzhiyun } while (0)
156*4882a593Smuzhiyun
157*4882a593Smuzhiyun #define vmx_asm2(insn, op1, op2, error_args...) \
158*4882a593Smuzhiyun do { \
159*4882a593Smuzhiyun asm_volatile_goto("1: " __stringify(insn) " %1, %0\n\t" \
160*4882a593Smuzhiyun ".byte 0x2e\n\t" /* branch not taken hint */ \
161*4882a593Smuzhiyun "jna %l[error]\n\t" \
162*4882a593Smuzhiyun _ASM_EXTABLE(1b, %l[fault]) \
163*4882a593Smuzhiyun : : op1, op2 : "cc" : error, fault); \
164*4882a593Smuzhiyun return; \
165*4882a593Smuzhiyun error: \
166*4882a593Smuzhiyun instrumentation_begin(); \
167*4882a593Smuzhiyun insn##_error(error_args); \
168*4882a593Smuzhiyun instrumentation_end(); \
169*4882a593Smuzhiyun return; \
170*4882a593Smuzhiyun fault: \
171*4882a593Smuzhiyun kvm_spurious_fault(); \
172*4882a593Smuzhiyun } while (0)
173*4882a593Smuzhiyun
__vmcs_writel(unsigned long field,unsigned long value)174*4882a593Smuzhiyun static __always_inline void __vmcs_writel(unsigned long field, unsigned long value)
175*4882a593Smuzhiyun {
176*4882a593Smuzhiyun vmx_asm2(vmwrite, "r"(field), "rm"(value), field, value);
177*4882a593Smuzhiyun }
178*4882a593Smuzhiyun
vmcs_write16(unsigned long field,u16 value)179*4882a593Smuzhiyun static __always_inline void vmcs_write16(unsigned long field, u16 value)
180*4882a593Smuzhiyun {
181*4882a593Smuzhiyun vmcs_check16(field);
182*4882a593Smuzhiyun if (static_branch_unlikely(&enable_evmcs))
183*4882a593Smuzhiyun return evmcs_write16(field, value);
184*4882a593Smuzhiyun
185*4882a593Smuzhiyun __vmcs_writel(field, value);
186*4882a593Smuzhiyun }
187*4882a593Smuzhiyun
vmcs_write32(unsigned long field,u32 value)188*4882a593Smuzhiyun static __always_inline void vmcs_write32(unsigned long field, u32 value)
189*4882a593Smuzhiyun {
190*4882a593Smuzhiyun vmcs_check32(field);
191*4882a593Smuzhiyun if (static_branch_unlikely(&enable_evmcs))
192*4882a593Smuzhiyun return evmcs_write32(field, value);
193*4882a593Smuzhiyun
194*4882a593Smuzhiyun __vmcs_writel(field, value);
195*4882a593Smuzhiyun }
196*4882a593Smuzhiyun
vmcs_write64(unsigned long field,u64 value)197*4882a593Smuzhiyun static __always_inline void vmcs_write64(unsigned long field, u64 value)
198*4882a593Smuzhiyun {
199*4882a593Smuzhiyun vmcs_check64(field);
200*4882a593Smuzhiyun if (static_branch_unlikely(&enable_evmcs))
201*4882a593Smuzhiyun return evmcs_write64(field, value);
202*4882a593Smuzhiyun
203*4882a593Smuzhiyun __vmcs_writel(field, value);
204*4882a593Smuzhiyun #ifndef CONFIG_X86_64
205*4882a593Smuzhiyun __vmcs_writel(field+1, value >> 32);
206*4882a593Smuzhiyun #endif
207*4882a593Smuzhiyun }
208*4882a593Smuzhiyun
vmcs_writel(unsigned long field,unsigned long value)209*4882a593Smuzhiyun static __always_inline void vmcs_writel(unsigned long field, unsigned long value)
210*4882a593Smuzhiyun {
211*4882a593Smuzhiyun vmcs_checkl(field);
212*4882a593Smuzhiyun if (static_branch_unlikely(&enable_evmcs))
213*4882a593Smuzhiyun return evmcs_write64(field, value);
214*4882a593Smuzhiyun
215*4882a593Smuzhiyun __vmcs_writel(field, value);
216*4882a593Smuzhiyun }
217*4882a593Smuzhiyun
vmcs_clear_bits(unsigned long field,u32 mask)218*4882a593Smuzhiyun static __always_inline void vmcs_clear_bits(unsigned long field, u32 mask)
219*4882a593Smuzhiyun {
220*4882a593Smuzhiyun BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x2000,
221*4882a593Smuzhiyun "vmcs_clear_bits does not support 64-bit fields");
222*4882a593Smuzhiyun if (static_branch_unlikely(&enable_evmcs))
223*4882a593Smuzhiyun return evmcs_write32(field, evmcs_read32(field) & ~mask);
224*4882a593Smuzhiyun
225*4882a593Smuzhiyun __vmcs_writel(field, __vmcs_readl(field) & ~mask);
226*4882a593Smuzhiyun }
227*4882a593Smuzhiyun
vmcs_set_bits(unsigned long field,u32 mask)228*4882a593Smuzhiyun static __always_inline void vmcs_set_bits(unsigned long field, u32 mask)
229*4882a593Smuzhiyun {
230*4882a593Smuzhiyun BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x2000,
231*4882a593Smuzhiyun "vmcs_set_bits does not support 64-bit fields");
232*4882a593Smuzhiyun if (static_branch_unlikely(&enable_evmcs))
233*4882a593Smuzhiyun return evmcs_write32(field, evmcs_read32(field) | mask);
234*4882a593Smuzhiyun
235*4882a593Smuzhiyun __vmcs_writel(field, __vmcs_readl(field) | mask);
236*4882a593Smuzhiyun }
237*4882a593Smuzhiyun
vmcs_clear(struct vmcs * vmcs)238*4882a593Smuzhiyun static inline void vmcs_clear(struct vmcs *vmcs)
239*4882a593Smuzhiyun {
240*4882a593Smuzhiyun u64 phys_addr = __pa(vmcs);
241*4882a593Smuzhiyun
242*4882a593Smuzhiyun vmx_asm1(vmclear, "m"(phys_addr), vmcs, phys_addr);
243*4882a593Smuzhiyun }
244*4882a593Smuzhiyun
vmcs_load(struct vmcs * vmcs)245*4882a593Smuzhiyun static inline void vmcs_load(struct vmcs *vmcs)
246*4882a593Smuzhiyun {
247*4882a593Smuzhiyun u64 phys_addr = __pa(vmcs);
248*4882a593Smuzhiyun
249*4882a593Smuzhiyun if (static_branch_unlikely(&enable_evmcs))
250*4882a593Smuzhiyun return evmcs_load(phys_addr);
251*4882a593Smuzhiyun
252*4882a593Smuzhiyun vmx_asm1(vmptrld, "m"(phys_addr), vmcs, phys_addr);
253*4882a593Smuzhiyun }
254*4882a593Smuzhiyun
__invvpid(unsigned long ext,u16 vpid,gva_t gva)255*4882a593Smuzhiyun static inline void __invvpid(unsigned long ext, u16 vpid, gva_t gva)
256*4882a593Smuzhiyun {
257*4882a593Smuzhiyun struct {
258*4882a593Smuzhiyun u64 vpid : 16;
259*4882a593Smuzhiyun u64 rsvd : 48;
260*4882a593Smuzhiyun u64 gva;
261*4882a593Smuzhiyun } operand = { vpid, 0, gva };
262*4882a593Smuzhiyun
263*4882a593Smuzhiyun vmx_asm2(invvpid, "r"(ext), "m"(operand), ext, vpid, gva);
264*4882a593Smuzhiyun }
265*4882a593Smuzhiyun
__invept(unsigned long ext,u64 eptp,gpa_t gpa)266*4882a593Smuzhiyun static inline void __invept(unsigned long ext, u64 eptp, gpa_t gpa)
267*4882a593Smuzhiyun {
268*4882a593Smuzhiyun struct {
269*4882a593Smuzhiyun u64 eptp, gpa;
270*4882a593Smuzhiyun } operand = {eptp, gpa};
271*4882a593Smuzhiyun
272*4882a593Smuzhiyun vmx_asm2(invept, "r"(ext), "m"(operand), ext, eptp, gpa);
273*4882a593Smuzhiyun }
274*4882a593Smuzhiyun
vpid_sync_vcpu_single(int vpid)275*4882a593Smuzhiyun static inline void vpid_sync_vcpu_single(int vpid)
276*4882a593Smuzhiyun {
277*4882a593Smuzhiyun if (vpid == 0)
278*4882a593Smuzhiyun return;
279*4882a593Smuzhiyun
280*4882a593Smuzhiyun __invvpid(VMX_VPID_EXTENT_SINGLE_CONTEXT, vpid, 0);
281*4882a593Smuzhiyun }
282*4882a593Smuzhiyun
vpid_sync_vcpu_global(void)283*4882a593Smuzhiyun static inline void vpid_sync_vcpu_global(void)
284*4882a593Smuzhiyun {
285*4882a593Smuzhiyun __invvpid(VMX_VPID_EXTENT_ALL_CONTEXT, 0, 0);
286*4882a593Smuzhiyun }
287*4882a593Smuzhiyun
vpid_sync_context(int vpid)288*4882a593Smuzhiyun static inline void vpid_sync_context(int vpid)
289*4882a593Smuzhiyun {
290*4882a593Smuzhiyun if (cpu_has_vmx_invvpid_single())
291*4882a593Smuzhiyun vpid_sync_vcpu_single(vpid);
292*4882a593Smuzhiyun else if (vpid != 0)
293*4882a593Smuzhiyun vpid_sync_vcpu_global();
294*4882a593Smuzhiyun }
295*4882a593Smuzhiyun
vpid_sync_vcpu_addr(int vpid,gva_t addr)296*4882a593Smuzhiyun static inline void vpid_sync_vcpu_addr(int vpid, gva_t addr)
297*4882a593Smuzhiyun {
298*4882a593Smuzhiyun if (vpid == 0)
299*4882a593Smuzhiyun return;
300*4882a593Smuzhiyun
301*4882a593Smuzhiyun if (cpu_has_vmx_invvpid_individual_addr())
302*4882a593Smuzhiyun __invvpid(VMX_VPID_EXTENT_INDIVIDUAL_ADDR, vpid, addr);
303*4882a593Smuzhiyun else
304*4882a593Smuzhiyun vpid_sync_context(vpid);
305*4882a593Smuzhiyun }
306*4882a593Smuzhiyun
ept_sync_global(void)307*4882a593Smuzhiyun static inline void ept_sync_global(void)
308*4882a593Smuzhiyun {
309*4882a593Smuzhiyun __invept(VMX_EPT_EXTENT_GLOBAL, 0, 0);
310*4882a593Smuzhiyun }
311*4882a593Smuzhiyun
ept_sync_context(u64 eptp)312*4882a593Smuzhiyun static inline void ept_sync_context(u64 eptp)
313*4882a593Smuzhiyun {
314*4882a593Smuzhiyun if (cpu_has_vmx_invept_context())
315*4882a593Smuzhiyun __invept(VMX_EPT_EXTENT_CONTEXT, eptp, 0);
316*4882a593Smuzhiyun else
317*4882a593Smuzhiyun ept_sync_global();
318*4882a593Smuzhiyun }
319*4882a593Smuzhiyun
320*4882a593Smuzhiyun #endif /* __KVM_X86_VMX_INSN_H */
321