1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0 */
2*4882a593Smuzhiyun #ifndef __KVM_X86_VMX_VMCS_H
3*4882a593Smuzhiyun #define __KVM_X86_VMX_VMCS_H
4*4882a593Smuzhiyun
5*4882a593Smuzhiyun #include <linux/ktime.h>
6*4882a593Smuzhiyun #include <linux/list.h>
7*4882a593Smuzhiyun #include <linux/nospec.h>
8*4882a593Smuzhiyun
9*4882a593Smuzhiyun #include <asm/kvm.h>
10*4882a593Smuzhiyun #include <asm/vmx.h>
11*4882a593Smuzhiyun
12*4882a593Smuzhiyun #include "capabilities.h"
13*4882a593Smuzhiyun
14*4882a593Smuzhiyun struct vmcs_hdr {
15*4882a593Smuzhiyun u32 revision_id:31;
16*4882a593Smuzhiyun u32 shadow_vmcs:1;
17*4882a593Smuzhiyun };
18*4882a593Smuzhiyun
19*4882a593Smuzhiyun struct vmcs {
20*4882a593Smuzhiyun struct vmcs_hdr hdr;
21*4882a593Smuzhiyun u32 abort;
22*4882a593Smuzhiyun char data[];
23*4882a593Smuzhiyun };
24*4882a593Smuzhiyun
25*4882a593Smuzhiyun DECLARE_PER_CPU(struct vmcs *, current_vmcs);
26*4882a593Smuzhiyun
27*4882a593Smuzhiyun /*
28*4882a593Smuzhiyun * vmcs_host_state tracks registers that are loaded from the VMCS on VMEXIT
29*4882a593Smuzhiyun * and whose values change infrequently, but are not constant. I.e. this is
30*4882a593Smuzhiyun * used as a write-through cache of the corresponding VMCS fields.
31*4882a593Smuzhiyun */
32*4882a593Smuzhiyun struct vmcs_host_state {
33*4882a593Smuzhiyun unsigned long cr3; /* May not match real cr3 */
34*4882a593Smuzhiyun unsigned long cr4; /* May not match real cr4 */
35*4882a593Smuzhiyun unsigned long gs_base;
36*4882a593Smuzhiyun unsigned long fs_base;
37*4882a593Smuzhiyun unsigned long rsp;
38*4882a593Smuzhiyun
39*4882a593Smuzhiyun u16 fs_sel, gs_sel, ldt_sel;
40*4882a593Smuzhiyun #ifdef CONFIG_X86_64
41*4882a593Smuzhiyun u16 ds_sel, es_sel;
42*4882a593Smuzhiyun #endif
43*4882a593Smuzhiyun };
44*4882a593Smuzhiyun
45*4882a593Smuzhiyun struct vmcs_controls_shadow {
46*4882a593Smuzhiyun u32 vm_entry;
47*4882a593Smuzhiyun u32 vm_exit;
48*4882a593Smuzhiyun u32 pin;
49*4882a593Smuzhiyun u32 exec;
50*4882a593Smuzhiyun u32 secondary_exec;
51*4882a593Smuzhiyun };
52*4882a593Smuzhiyun
53*4882a593Smuzhiyun /*
54*4882a593Smuzhiyun * Track a VMCS that may be loaded on a certain CPU. If it is (cpu!=-1), also
55*4882a593Smuzhiyun * remember whether it was VMLAUNCHed, and maintain a linked list of all VMCSs
56*4882a593Smuzhiyun * loaded on this CPU (so we can clear them if the CPU goes down).
57*4882a593Smuzhiyun */
58*4882a593Smuzhiyun struct loaded_vmcs {
59*4882a593Smuzhiyun struct vmcs *vmcs;
60*4882a593Smuzhiyun struct vmcs *shadow_vmcs;
61*4882a593Smuzhiyun int cpu;
62*4882a593Smuzhiyun bool launched;
63*4882a593Smuzhiyun bool nmi_known_unmasked;
64*4882a593Smuzhiyun bool hv_timer_soft_disabled;
65*4882a593Smuzhiyun /* Support for vnmi-less CPUs */
66*4882a593Smuzhiyun int soft_vnmi_blocked;
67*4882a593Smuzhiyun ktime_t entry_time;
68*4882a593Smuzhiyun s64 vnmi_blocked_time;
69*4882a593Smuzhiyun unsigned long *msr_bitmap;
70*4882a593Smuzhiyun struct list_head loaded_vmcss_on_cpu_link;
71*4882a593Smuzhiyun struct vmcs_host_state host_state;
72*4882a593Smuzhiyun struct vmcs_controls_shadow controls_shadow;
73*4882a593Smuzhiyun };
74*4882a593Smuzhiyun
is_intr_type(u32 intr_info,u32 type)75*4882a593Smuzhiyun static inline bool is_intr_type(u32 intr_info, u32 type)
76*4882a593Smuzhiyun {
77*4882a593Smuzhiyun const u32 mask = INTR_INFO_VALID_MASK | INTR_INFO_INTR_TYPE_MASK;
78*4882a593Smuzhiyun
79*4882a593Smuzhiyun return (intr_info & mask) == (INTR_INFO_VALID_MASK | type);
80*4882a593Smuzhiyun }
81*4882a593Smuzhiyun
is_intr_type_n(u32 intr_info,u32 type,u8 vector)82*4882a593Smuzhiyun static inline bool is_intr_type_n(u32 intr_info, u32 type, u8 vector)
83*4882a593Smuzhiyun {
84*4882a593Smuzhiyun const u32 mask = INTR_INFO_VALID_MASK | INTR_INFO_INTR_TYPE_MASK |
85*4882a593Smuzhiyun INTR_INFO_VECTOR_MASK;
86*4882a593Smuzhiyun
87*4882a593Smuzhiyun return (intr_info & mask) == (INTR_INFO_VALID_MASK | type | vector);
88*4882a593Smuzhiyun }
89*4882a593Smuzhiyun
is_exception_n(u32 intr_info,u8 vector)90*4882a593Smuzhiyun static inline bool is_exception_n(u32 intr_info, u8 vector)
91*4882a593Smuzhiyun {
92*4882a593Smuzhiyun return is_intr_type_n(intr_info, INTR_TYPE_HARD_EXCEPTION, vector);
93*4882a593Smuzhiyun }
94*4882a593Smuzhiyun
is_debug(u32 intr_info)95*4882a593Smuzhiyun static inline bool is_debug(u32 intr_info)
96*4882a593Smuzhiyun {
97*4882a593Smuzhiyun return is_exception_n(intr_info, DB_VECTOR);
98*4882a593Smuzhiyun }
99*4882a593Smuzhiyun
is_breakpoint(u32 intr_info)100*4882a593Smuzhiyun static inline bool is_breakpoint(u32 intr_info)
101*4882a593Smuzhiyun {
102*4882a593Smuzhiyun return is_exception_n(intr_info, BP_VECTOR);
103*4882a593Smuzhiyun }
104*4882a593Smuzhiyun
is_double_fault(u32 intr_info)105*4882a593Smuzhiyun static inline bool is_double_fault(u32 intr_info)
106*4882a593Smuzhiyun {
107*4882a593Smuzhiyun return is_exception_n(intr_info, DF_VECTOR);
108*4882a593Smuzhiyun }
109*4882a593Smuzhiyun
is_page_fault(u32 intr_info)110*4882a593Smuzhiyun static inline bool is_page_fault(u32 intr_info)
111*4882a593Smuzhiyun {
112*4882a593Smuzhiyun return is_exception_n(intr_info, PF_VECTOR);
113*4882a593Smuzhiyun }
114*4882a593Smuzhiyun
is_invalid_opcode(u32 intr_info)115*4882a593Smuzhiyun static inline bool is_invalid_opcode(u32 intr_info)
116*4882a593Smuzhiyun {
117*4882a593Smuzhiyun return is_exception_n(intr_info, UD_VECTOR);
118*4882a593Smuzhiyun }
119*4882a593Smuzhiyun
is_gp_fault(u32 intr_info)120*4882a593Smuzhiyun static inline bool is_gp_fault(u32 intr_info)
121*4882a593Smuzhiyun {
122*4882a593Smuzhiyun return is_exception_n(intr_info, GP_VECTOR);
123*4882a593Smuzhiyun }
124*4882a593Smuzhiyun
is_alignment_check(u32 intr_info)125*4882a593Smuzhiyun static inline bool is_alignment_check(u32 intr_info)
126*4882a593Smuzhiyun {
127*4882a593Smuzhiyun return is_exception_n(intr_info, AC_VECTOR);
128*4882a593Smuzhiyun }
129*4882a593Smuzhiyun
is_machine_check(u32 intr_info)130*4882a593Smuzhiyun static inline bool is_machine_check(u32 intr_info)
131*4882a593Smuzhiyun {
132*4882a593Smuzhiyun return is_exception_n(intr_info, MC_VECTOR);
133*4882a593Smuzhiyun }
134*4882a593Smuzhiyun
135*4882a593Smuzhiyun /* Undocumented: icebp/int1 */
is_icebp(u32 intr_info)136*4882a593Smuzhiyun static inline bool is_icebp(u32 intr_info)
137*4882a593Smuzhiyun {
138*4882a593Smuzhiyun return is_intr_type(intr_info, INTR_TYPE_PRIV_SW_EXCEPTION);
139*4882a593Smuzhiyun }
140*4882a593Smuzhiyun
is_nmi(u32 intr_info)141*4882a593Smuzhiyun static inline bool is_nmi(u32 intr_info)
142*4882a593Smuzhiyun {
143*4882a593Smuzhiyun return is_intr_type(intr_info, INTR_TYPE_NMI_INTR);
144*4882a593Smuzhiyun }
145*4882a593Smuzhiyun
is_external_intr(u32 intr_info)146*4882a593Smuzhiyun static inline bool is_external_intr(u32 intr_info)
147*4882a593Smuzhiyun {
148*4882a593Smuzhiyun return is_intr_type(intr_info, INTR_TYPE_EXT_INTR);
149*4882a593Smuzhiyun }
150*4882a593Smuzhiyun
is_exception_with_error_code(u32 intr_info)151*4882a593Smuzhiyun static inline bool is_exception_with_error_code(u32 intr_info)
152*4882a593Smuzhiyun {
153*4882a593Smuzhiyun const u32 mask = INTR_INFO_VALID_MASK | INTR_INFO_DELIVER_CODE_MASK;
154*4882a593Smuzhiyun
155*4882a593Smuzhiyun return (intr_info & mask) == mask;
156*4882a593Smuzhiyun }
157*4882a593Smuzhiyun
158*4882a593Smuzhiyun enum vmcs_field_width {
159*4882a593Smuzhiyun VMCS_FIELD_WIDTH_U16 = 0,
160*4882a593Smuzhiyun VMCS_FIELD_WIDTH_U64 = 1,
161*4882a593Smuzhiyun VMCS_FIELD_WIDTH_U32 = 2,
162*4882a593Smuzhiyun VMCS_FIELD_WIDTH_NATURAL_WIDTH = 3
163*4882a593Smuzhiyun };
164*4882a593Smuzhiyun
vmcs_field_width(unsigned long field)165*4882a593Smuzhiyun static inline int vmcs_field_width(unsigned long field)
166*4882a593Smuzhiyun {
167*4882a593Smuzhiyun if (0x1 & field) /* the *_HIGH fields are all 32 bit */
168*4882a593Smuzhiyun return VMCS_FIELD_WIDTH_U32;
169*4882a593Smuzhiyun return (field >> 13) & 0x3;
170*4882a593Smuzhiyun }
171*4882a593Smuzhiyun
vmcs_field_readonly(unsigned long field)172*4882a593Smuzhiyun static inline int vmcs_field_readonly(unsigned long field)
173*4882a593Smuzhiyun {
174*4882a593Smuzhiyun return (((field >> 10) & 0x3) == 1);
175*4882a593Smuzhiyun }
176*4882a593Smuzhiyun
177*4882a593Smuzhiyun #endif /* __KVM_X86_VMX_VMCS_H */
178