1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0 */
2*4882a593Smuzhiyun #ifndef __KVM_X86_VMX_H
3*4882a593Smuzhiyun #define __KVM_X86_VMX_H
4*4882a593Smuzhiyun
5*4882a593Smuzhiyun #include <linux/kvm_host.h>
6*4882a593Smuzhiyun
7*4882a593Smuzhiyun #include <asm/kvm.h>
8*4882a593Smuzhiyun #include <asm/intel_pt.h>
9*4882a593Smuzhiyun
10*4882a593Smuzhiyun #include "capabilities.h"
11*4882a593Smuzhiyun #include "kvm_cache_regs.h"
12*4882a593Smuzhiyun #include "posted_intr.h"
13*4882a593Smuzhiyun #include "vmcs.h"
14*4882a593Smuzhiyun #include "vmx_ops.h"
15*4882a593Smuzhiyun #include "cpuid.h"
16*4882a593Smuzhiyun #include "run_flags.h"
17*4882a593Smuzhiyun
18*4882a593Smuzhiyun extern const u32 vmx_msr_index[];
19*4882a593Smuzhiyun
20*4882a593Smuzhiyun #define MSR_TYPE_R 1
21*4882a593Smuzhiyun #define MSR_TYPE_W 2
22*4882a593Smuzhiyun #define MSR_TYPE_RW 3
23*4882a593Smuzhiyun
24*4882a593Smuzhiyun #define X2APIC_MSR(r) (APIC_BASE_MSR + ((r) >> 4))
25*4882a593Smuzhiyun
26*4882a593Smuzhiyun #ifdef CONFIG_X86_64
27*4882a593Smuzhiyun #define MAX_NR_USER_RETURN_MSRS 7
28*4882a593Smuzhiyun #else
29*4882a593Smuzhiyun #define MAX_NR_USER_RETURN_MSRS 4
30*4882a593Smuzhiyun #endif
31*4882a593Smuzhiyun
32*4882a593Smuzhiyun #define MAX_NR_LOADSTORE_MSRS 8
33*4882a593Smuzhiyun
34*4882a593Smuzhiyun struct vmx_msrs {
35*4882a593Smuzhiyun unsigned int nr;
36*4882a593Smuzhiyun struct vmx_msr_entry val[MAX_NR_LOADSTORE_MSRS];
37*4882a593Smuzhiyun };
38*4882a593Smuzhiyun
39*4882a593Smuzhiyun struct vmx_uret_msr {
40*4882a593Smuzhiyun unsigned int slot; /* The MSR's slot in kvm_user_return_msrs. */
41*4882a593Smuzhiyun u64 data;
42*4882a593Smuzhiyun u64 mask;
43*4882a593Smuzhiyun };
44*4882a593Smuzhiyun
45*4882a593Smuzhiyun enum segment_cache_field {
46*4882a593Smuzhiyun SEG_FIELD_SEL = 0,
47*4882a593Smuzhiyun SEG_FIELD_BASE = 1,
48*4882a593Smuzhiyun SEG_FIELD_LIMIT = 2,
49*4882a593Smuzhiyun SEG_FIELD_AR = 3,
50*4882a593Smuzhiyun
51*4882a593Smuzhiyun SEG_FIELD_NR = 4
52*4882a593Smuzhiyun };
53*4882a593Smuzhiyun
54*4882a593Smuzhiyun #define RTIT_ADDR_RANGE 4
55*4882a593Smuzhiyun
56*4882a593Smuzhiyun struct pt_ctx {
57*4882a593Smuzhiyun u64 ctl;
58*4882a593Smuzhiyun u64 status;
59*4882a593Smuzhiyun u64 output_base;
60*4882a593Smuzhiyun u64 output_mask;
61*4882a593Smuzhiyun u64 cr3_match;
62*4882a593Smuzhiyun u64 addr_a[RTIT_ADDR_RANGE];
63*4882a593Smuzhiyun u64 addr_b[RTIT_ADDR_RANGE];
64*4882a593Smuzhiyun };
65*4882a593Smuzhiyun
66*4882a593Smuzhiyun struct pt_desc {
67*4882a593Smuzhiyun u64 ctl_bitmask;
68*4882a593Smuzhiyun u32 addr_range;
69*4882a593Smuzhiyun u32 caps[PT_CPUID_REGS_NUM * PT_CPUID_LEAVES];
70*4882a593Smuzhiyun struct pt_ctx host;
71*4882a593Smuzhiyun struct pt_ctx guest;
72*4882a593Smuzhiyun };
73*4882a593Smuzhiyun
74*4882a593Smuzhiyun union vmx_exit_reason {
75*4882a593Smuzhiyun struct {
76*4882a593Smuzhiyun u32 basic : 16;
77*4882a593Smuzhiyun u32 reserved16 : 1;
78*4882a593Smuzhiyun u32 reserved17 : 1;
79*4882a593Smuzhiyun u32 reserved18 : 1;
80*4882a593Smuzhiyun u32 reserved19 : 1;
81*4882a593Smuzhiyun u32 reserved20 : 1;
82*4882a593Smuzhiyun u32 reserved21 : 1;
83*4882a593Smuzhiyun u32 reserved22 : 1;
84*4882a593Smuzhiyun u32 reserved23 : 1;
85*4882a593Smuzhiyun u32 reserved24 : 1;
86*4882a593Smuzhiyun u32 reserved25 : 1;
87*4882a593Smuzhiyun u32 reserved26 : 1;
88*4882a593Smuzhiyun u32 enclave_mode : 1;
89*4882a593Smuzhiyun u32 smi_pending_mtf : 1;
90*4882a593Smuzhiyun u32 smi_from_vmx_root : 1;
91*4882a593Smuzhiyun u32 reserved30 : 1;
92*4882a593Smuzhiyun u32 failed_vmentry : 1;
93*4882a593Smuzhiyun };
94*4882a593Smuzhiyun u32 full;
95*4882a593Smuzhiyun };
96*4882a593Smuzhiyun
97*4882a593Smuzhiyun /*
98*4882a593Smuzhiyun * The nested_vmx structure is part of vcpu_vmx, and holds information we need
99*4882a593Smuzhiyun * for correct emulation of VMX (i.e., nested VMX) on this vcpu.
100*4882a593Smuzhiyun */
101*4882a593Smuzhiyun struct nested_vmx {
102*4882a593Smuzhiyun /* Has the level1 guest done vmxon? */
103*4882a593Smuzhiyun bool vmxon;
104*4882a593Smuzhiyun gpa_t vmxon_ptr;
105*4882a593Smuzhiyun bool pml_full;
106*4882a593Smuzhiyun
107*4882a593Smuzhiyun /* The guest-physical address of the current VMCS L1 keeps for L2 */
108*4882a593Smuzhiyun gpa_t current_vmptr;
109*4882a593Smuzhiyun /*
110*4882a593Smuzhiyun * Cache of the guest's VMCS, existing outside of guest memory.
111*4882a593Smuzhiyun * Loaded from guest memory during VMPTRLD. Flushed to guest
112*4882a593Smuzhiyun * memory during VMCLEAR and VMPTRLD.
113*4882a593Smuzhiyun */
114*4882a593Smuzhiyun struct vmcs12 *cached_vmcs12;
115*4882a593Smuzhiyun /*
116*4882a593Smuzhiyun * Cache of the guest's shadow VMCS, existing outside of guest
117*4882a593Smuzhiyun * memory. Loaded from guest memory during VM entry. Flushed
118*4882a593Smuzhiyun * to guest memory during VM exit.
119*4882a593Smuzhiyun */
120*4882a593Smuzhiyun struct vmcs12 *cached_shadow_vmcs12;
121*4882a593Smuzhiyun
122*4882a593Smuzhiyun /*
123*4882a593Smuzhiyun * Indicates if the shadow vmcs or enlightened vmcs must be updated
124*4882a593Smuzhiyun * with the data held by struct vmcs12.
125*4882a593Smuzhiyun */
126*4882a593Smuzhiyun bool need_vmcs12_to_shadow_sync;
127*4882a593Smuzhiyun bool dirty_vmcs12;
128*4882a593Smuzhiyun
129*4882a593Smuzhiyun /*
130*4882a593Smuzhiyun * Indicates lazily loaded guest state has not yet been decached from
131*4882a593Smuzhiyun * vmcs02.
132*4882a593Smuzhiyun */
133*4882a593Smuzhiyun bool need_sync_vmcs02_to_vmcs12_rare;
134*4882a593Smuzhiyun
135*4882a593Smuzhiyun /*
136*4882a593Smuzhiyun * vmcs02 has been initialized, i.e. state that is constant for
137*4882a593Smuzhiyun * vmcs02 has been written to the backing VMCS. Initialization
138*4882a593Smuzhiyun * is delayed until L1 actually attempts to run a nested VM.
139*4882a593Smuzhiyun */
140*4882a593Smuzhiyun bool vmcs02_initialized;
141*4882a593Smuzhiyun
142*4882a593Smuzhiyun bool change_vmcs01_virtual_apic_mode;
143*4882a593Smuzhiyun bool reload_vmcs01_apic_access_page;
144*4882a593Smuzhiyun
145*4882a593Smuzhiyun /*
146*4882a593Smuzhiyun * Enlightened VMCS has been enabled. It does not mean that L1 has to
147*4882a593Smuzhiyun * use it. However, VMX features available to L1 will be limited based
148*4882a593Smuzhiyun * on what the enlightened VMCS supports.
149*4882a593Smuzhiyun */
150*4882a593Smuzhiyun bool enlightened_vmcs_enabled;
151*4882a593Smuzhiyun
152*4882a593Smuzhiyun /* L2 must run next, and mustn't decide to exit to L1. */
153*4882a593Smuzhiyun bool nested_run_pending;
154*4882a593Smuzhiyun
155*4882a593Smuzhiyun /* Pending MTF VM-exit into L1. */
156*4882a593Smuzhiyun bool mtf_pending;
157*4882a593Smuzhiyun
158*4882a593Smuzhiyun struct loaded_vmcs vmcs02;
159*4882a593Smuzhiyun
160*4882a593Smuzhiyun /*
161*4882a593Smuzhiyun * Guest pages referred to in the vmcs02 with host-physical
162*4882a593Smuzhiyun * pointers, so we must keep them pinned while L2 runs.
163*4882a593Smuzhiyun */
164*4882a593Smuzhiyun struct page *apic_access_page;
165*4882a593Smuzhiyun struct kvm_host_map virtual_apic_map;
166*4882a593Smuzhiyun struct kvm_host_map pi_desc_map;
167*4882a593Smuzhiyun
168*4882a593Smuzhiyun struct kvm_host_map msr_bitmap_map;
169*4882a593Smuzhiyun
170*4882a593Smuzhiyun struct pi_desc *pi_desc;
171*4882a593Smuzhiyun bool pi_pending;
172*4882a593Smuzhiyun u16 posted_intr_nv;
173*4882a593Smuzhiyun
174*4882a593Smuzhiyun struct hrtimer preemption_timer;
175*4882a593Smuzhiyun u64 preemption_timer_deadline;
176*4882a593Smuzhiyun bool has_preemption_timer_deadline;
177*4882a593Smuzhiyun bool preemption_timer_expired;
178*4882a593Smuzhiyun
179*4882a593Smuzhiyun /* to migrate it to L2 if VM_ENTRY_LOAD_DEBUG_CONTROLS is off */
180*4882a593Smuzhiyun u64 vmcs01_debugctl;
181*4882a593Smuzhiyun u64 vmcs01_guest_bndcfgs;
182*4882a593Smuzhiyun
183*4882a593Smuzhiyun /* to migrate it to L1 if L2 writes to L1's CR8 directly */
184*4882a593Smuzhiyun int l1_tpr_threshold;
185*4882a593Smuzhiyun
186*4882a593Smuzhiyun u16 vpid02;
187*4882a593Smuzhiyun u16 last_vpid;
188*4882a593Smuzhiyun
189*4882a593Smuzhiyun struct nested_vmx_msrs msrs;
190*4882a593Smuzhiyun
191*4882a593Smuzhiyun /* SMM related state */
192*4882a593Smuzhiyun struct {
193*4882a593Smuzhiyun /* in VMX operation on SMM entry? */
194*4882a593Smuzhiyun bool vmxon;
195*4882a593Smuzhiyun /* in guest mode on SMM entry? */
196*4882a593Smuzhiyun bool guest_mode;
197*4882a593Smuzhiyun } smm;
198*4882a593Smuzhiyun
199*4882a593Smuzhiyun gpa_t hv_evmcs_vmptr;
200*4882a593Smuzhiyun struct kvm_host_map hv_evmcs_map;
201*4882a593Smuzhiyun struct hv_enlightened_vmcs *hv_evmcs;
202*4882a593Smuzhiyun };
203*4882a593Smuzhiyun
204*4882a593Smuzhiyun struct vcpu_vmx {
205*4882a593Smuzhiyun struct kvm_vcpu vcpu;
206*4882a593Smuzhiyun u8 fail;
207*4882a593Smuzhiyun u8 msr_bitmap_mode;
208*4882a593Smuzhiyun
209*4882a593Smuzhiyun /*
210*4882a593Smuzhiyun * If true, host state has been stored in vmx->loaded_vmcs for
211*4882a593Smuzhiyun * the CPU registers that only need to be switched when transitioning
212*4882a593Smuzhiyun * to/from the kernel, and the registers have been loaded with guest
213*4882a593Smuzhiyun * values. If false, host state is loaded in the CPU registers
214*4882a593Smuzhiyun * and vmx->loaded_vmcs->host_state is invalid.
215*4882a593Smuzhiyun */
216*4882a593Smuzhiyun bool guest_state_loaded;
217*4882a593Smuzhiyun
218*4882a593Smuzhiyun unsigned long exit_qualification;
219*4882a593Smuzhiyun u32 exit_intr_info;
220*4882a593Smuzhiyun u32 idt_vectoring_info;
221*4882a593Smuzhiyun ulong rflags;
222*4882a593Smuzhiyun
223*4882a593Smuzhiyun struct vmx_uret_msr guest_uret_msrs[MAX_NR_USER_RETURN_MSRS];
224*4882a593Smuzhiyun int nr_uret_msrs;
225*4882a593Smuzhiyun int nr_active_uret_msrs;
226*4882a593Smuzhiyun bool guest_uret_msrs_loaded;
227*4882a593Smuzhiyun #ifdef CONFIG_X86_64
228*4882a593Smuzhiyun u64 msr_host_kernel_gs_base;
229*4882a593Smuzhiyun u64 msr_guest_kernel_gs_base;
230*4882a593Smuzhiyun #endif
231*4882a593Smuzhiyun
232*4882a593Smuzhiyun u64 spec_ctrl;
233*4882a593Smuzhiyun u32 msr_ia32_umwait_control;
234*4882a593Smuzhiyun
235*4882a593Smuzhiyun u32 secondary_exec_control;
236*4882a593Smuzhiyun
237*4882a593Smuzhiyun /*
238*4882a593Smuzhiyun * loaded_vmcs points to the VMCS currently used in this vcpu. For a
239*4882a593Smuzhiyun * non-nested (L1) guest, it always points to vmcs01. For a nested
240*4882a593Smuzhiyun * guest (L2), it points to a different VMCS.
241*4882a593Smuzhiyun */
242*4882a593Smuzhiyun struct loaded_vmcs vmcs01;
243*4882a593Smuzhiyun struct loaded_vmcs *loaded_vmcs;
244*4882a593Smuzhiyun
245*4882a593Smuzhiyun struct msr_autoload {
246*4882a593Smuzhiyun struct vmx_msrs guest;
247*4882a593Smuzhiyun struct vmx_msrs host;
248*4882a593Smuzhiyun } msr_autoload;
249*4882a593Smuzhiyun
250*4882a593Smuzhiyun struct msr_autostore {
251*4882a593Smuzhiyun struct vmx_msrs guest;
252*4882a593Smuzhiyun } msr_autostore;
253*4882a593Smuzhiyun
254*4882a593Smuzhiyun struct {
255*4882a593Smuzhiyun int vm86_active;
256*4882a593Smuzhiyun ulong save_rflags;
257*4882a593Smuzhiyun struct kvm_segment segs[8];
258*4882a593Smuzhiyun } rmode;
259*4882a593Smuzhiyun struct {
260*4882a593Smuzhiyun u32 bitmask; /* 4 bits per segment (1 bit per field) */
261*4882a593Smuzhiyun struct kvm_save_segment {
262*4882a593Smuzhiyun u16 selector;
263*4882a593Smuzhiyun unsigned long base;
264*4882a593Smuzhiyun u32 limit;
265*4882a593Smuzhiyun u32 ar;
266*4882a593Smuzhiyun } seg[8];
267*4882a593Smuzhiyun } segment_cache;
268*4882a593Smuzhiyun int vpid;
269*4882a593Smuzhiyun bool emulation_required;
270*4882a593Smuzhiyun
271*4882a593Smuzhiyun union vmx_exit_reason exit_reason;
272*4882a593Smuzhiyun
273*4882a593Smuzhiyun /* Posted interrupt descriptor */
274*4882a593Smuzhiyun struct pi_desc pi_desc;
275*4882a593Smuzhiyun
276*4882a593Smuzhiyun /* Support for a guest hypervisor (nested VMX) */
277*4882a593Smuzhiyun struct nested_vmx nested;
278*4882a593Smuzhiyun
279*4882a593Smuzhiyun /* Dynamic PLE window. */
280*4882a593Smuzhiyun unsigned int ple_window;
281*4882a593Smuzhiyun bool ple_window_dirty;
282*4882a593Smuzhiyun
283*4882a593Smuzhiyun bool req_immediate_exit;
284*4882a593Smuzhiyun
285*4882a593Smuzhiyun /* Support for PML */
286*4882a593Smuzhiyun #define PML_ENTITY_NUM 512
287*4882a593Smuzhiyun struct page *pml_pg;
288*4882a593Smuzhiyun
289*4882a593Smuzhiyun /* apic deadline value in host tsc */
290*4882a593Smuzhiyun u64 hv_deadline_tsc;
291*4882a593Smuzhiyun
292*4882a593Smuzhiyun u64 current_tsc_ratio;
293*4882a593Smuzhiyun
294*4882a593Smuzhiyun unsigned long host_debugctlmsr;
295*4882a593Smuzhiyun
296*4882a593Smuzhiyun /*
297*4882a593Smuzhiyun * Only bits masked by msr_ia32_feature_control_valid_bits can be set in
298*4882a593Smuzhiyun * msr_ia32_feature_control. FEAT_CTL_LOCKED is always included
299*4882a593Smuzhiyun * in msr_ia32_feature_control_valid_bits.
300*4882a593Smuzhiyun */
301*4882a593Smuzhiyun u64 msr_ia32_feature_control;
302*4882a593Smuzhiyun u64 msr_ia32_feature_control_valid_bits;
303*4882a593Smuzhiyun u64 ept_pointer;
304*4882a593Smuzhiyun u64 msr_ia32_mcu_opt_ctrl;
305*4882a593Smuzhiyun bool disable_fb_clear;
306*4882a593Smuzhiyun
307*4882a593Smuzhiyun struct pt_desc pt_desc;
308*4882a593Smuzhiyun
309*4882a593Smuzhiyun /* Save desired MSR intercept (read: pass-through) state */
310*4882a593Smuzhiyun #define MAX_POSSIBLE_PASSTHROUGH_MSRS 13
311*4882a593Smuzhiyun struct {
312*4882a593Smuzhiyun DECLARE_BITMAP(read, MAX_POSSIBLE_PASSTHROUGH_MSRS);
313*4882a593Smuzhiyun DECLARE_BITMAP(write, MAX_POSSIBLE_PASSTHROUGH_MSRS);
314*4882a593Smuzhiyun } shadow_msr_intercept;
315*4882a593Smuzhiyun };
316*4882a593Smuzhiyun
317*4882a593Smuzhiyun enum ept_pointers_status {
318*4882a593Smuzhiyun EPT_POINTERS_CHECK = 0,
319*4882a593Smuzhiyun EPT_POINTERS_MATCH = 1,
320*4882a593Smuzhiyun EPT_POINTERS_MISMATCH = 2
321*4882a593Smuzhiyun };
322*4882a593Smuzhiyun
323*4882a593Smuzhiyun struct kvm_vmx {
324*4882a593Smuzhiyun struct kvm kvm;
325*4882a593Smuzhiyun
326*4882a593Smuzhiyun unsigned int tss_addr;
327*4882a593Smuzhiyun bool ept_identity_pagetable_done;
328*4882a593Smuzhiyun gpa_t ept_identity_map_addr;
329*4882a593Smuzhiyun
330*4882a593Smuzhiyun enum ept_pointers_status ept_pointers_match;
331*4882a593Smuzhiyun spinlock_t ept_pointer_lock;
332*4882a593Smuzhiyun };
333*4882a593Smuzhiyun
334*4882a593Smuzhiyun bool nested_vmx_allowed(struct kvm_vcpu *vcpu);
335*4882a593Smuzhiyun void vmx_vcpu_load_vmcs(struct kvm_vcpu *vcpu, int cpu,
336*4882a593Smuzhiyun struct loaded_vmcs *buddy);
337*4882a593Smuzhiyun int allocate_vpid(void);
338*4882a593Smuzhiyun void free_vpid(int vpid);
339*4882a593Smuzhiyun void vmx_set_constant_host_state(struct vcpu_vmx *vmx);
340*4882a593Smuzhiyun void vmx_prepare_switch_to_guest(struct kvm_vcpu *vcpu);
341*4882a593Smuzhiyun void vmx_set_host_fs_gs(struct vmcs_host_state *host, u16 fs_sel, u16 gs_sel,
342*4882a593Smuzhiyun unsigned long fs_base, unsigned long gs_base);
343*4882a593Smuzhiyun int vmx_get_cpl(struct kvm_vcpu *vcpu);
344*4882a593Smuzhiyun unsigned long vmx_get_rflags(struct kvm_vcpu *vcpu);
345*4882a593Smuzhiyun void vmx_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags);
346*4882a593Smuzhiyun u32 vmx_get_interrupt_shadow(struct kvm_vcpu *vcpu);
347*4882a593Smuzhiyun void vmx_set_interrupt_shadow(struct kvm_vcpu *vcpu, int mask);
348*4882a593Smuzhiyun int vmx_set_efer(struct kvm_vcpu *vcpu, u64 efer);
349*4882a593Smuzhiyun void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0);
350*4882a593Smuzhiyun void vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4);
351*4882a593Smuzhiyun void set_cr4_guest_host_mask(struct vcpu_vmx *vmx);
352*4882a593Smuzhiyun void ept_save_pdptrs(struct kvm_vcpu *vcpu);
353*4882a593Smuzhiyun void vmx_get_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg);
354*4882a593Smuzhiyun void vmx_set_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg);
355*4882a593Smuzhiyun u64 construct_eptp(struct kvm_vcpu *vcpu, unsigned long root_hpa,
356*4882a593Smuzhiyun int root_level);
357*4882a593Smuzhiyun
358*4882a593Smuzhiyun bool vmx_guest_inject_ac(struct kvm_vcpu *vcpu);
359*4882a593Smuzhiyun void update_exception_bitmap(struct kvm_vcpu *vcpu);
360*4882a593Smuzhiyun void vmx_update_msr_bitmap(struct kvm_vcpu *vcpu);
361*4882a593Smuzhiyun bool vmx_nmi_blocked(struct kvm_vcpu *vcpu);
362*4882a593Smuzhiyun bool vmx_interrupt_blocked(struct kvm_vcpu *vcpu);
363*4882a593Smuzhiyun bool vmx_get_nmi_mask(struct kvm_vcpu *vcpu);
364*4882a593Smuzhiyun void vmx_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked);
365*4882a593Smuzhiyun void vmx_set_virtual_apic_mode(struct kvm_vcpu *vcpu);
366*4882a593Smuzhiyun struct vmx_uret_msr *vmx_find_uret_msr(struct vcpu_vmx *vmx, u32 msr);
367*4882a593Smuzhiyun void pt_update_intercept_for_msr(struct kvm_vcpu *vcpu);
368*4882a593Smuzhiyun void vmx_update_host_rsp(struct vcpu_vmx *vmx, unsigned long host_rsp);
369*4882a593Smuzhiyun void vmx_spec_ctrl_restore_host(struct vcpu_vmx *vmx, unsigned int flags);
370*4882a593Smuzhiyun unsigned int __vmx_vcpu_run_flags(struct vcpu_vmx *vmx);
371*4882a593Smuzhiyun bool __vmx_vcpu_run(struct vcpu_vmx *vmx, unsigned long *regs,
372*4882a593Smuzhiyun unsigned int flags);
373*4882a593Smuzhiyun int vmx_find_loadstore_msr_slot(struct vmx_msrs *m, u32 msr);
374*4882a593Smuzhiyun void vmx_ept_load_pdptrs(struct kvm_vcpu *vcpu);
375*4882a593Smuzhiyun
vmx_get_rvi(void)376*4882a593Smuzhiyun static inline u8 vmx_get_rvi(void)
377*4882a593Smuzhiyun {
378*4882a593Smuzhiyun return vmcs_read16(GUEST_INTR_STATUS) & 0xff;
379*4882a593Smuzhiyun }
380*4882a593Smuzhiyun
381*4882a593Smuzhiyun #define BUILD_CONTROLS_SHADOW(lname, uname) \
382*4882a593Smuzhiyun static inline void lname##_controls_set(struct vcpu_vmx *vmx, u32 val) \
383*4882a593Smuzhiyun { \
384*4882a593Smuzhiyun if (vmx->loaded_vmcs->controls_shadow.lname != val) { \
385*4882a593Smuzhiyun vmcs_write32(uname, val); \
386*4882a593Smuzhiyun vmx->loaded_vmcs->controls_shadow.lname = val; \
387*4882a593Smuzhiyun } \
388*4882a593Smuzhiyun } \
389*4882a593Smuzhiyun static inline u32 __##lname##_controls_get(struct loaded_vmcs *vmcs) \
390*4882a593Smuzhiyun { \
391*4882a593Smuzhiyun return vmcs->controls_shadow.lname; \
392*4882a593Smuzhiyun } \
393*4882a593Smuzhiyun static inline u32 lname##_controls_get(struct vcpu_vmx *vmx) \
394*4882a593Smuzhiyun { \
395*4882a593Smuzhiyun return __##lname##_controls_get(vmx->loaded_vmcs); \
396*4882a593Smuzhiyun } \
397*4882a593Smuzhiyun static inline void lname##_controls_setbit(struct vcpu_vmx *vmx, u32 val) \
398*4882a593Smuzhiyun { \
399*4882a593Smuzhiyun lname##_controls_set(vmx, lname##_controls_get(vmx) | val); \
400*4882a593Smuzhiyun } \
401*4882a593Smuzhiyun static inline void lname##_controls_clearbit(struct vcpu_vmx *vmx, u32 val) \
402*4882a593Smuzhiyun { \
403*4882a593Smuzhiyun lname##_controls_set(vmx, lname##_controls_get(vmx) & ~val); \
404*4882a593Smuzhiyun }
BUILD_CONTROLS_SHADOW(vm_entry,VM_ENTRY_CONTROLS)405*4882a593Smuzhiyun BUILD_CONTROLS_SHADOW(vm_entry, VM_ENTRY_CONTROLS)
406*4882a593Smuzhiyun BUILD_CONTROLS_SHADOW(vm_exit, VM_EXIT_CONTROLS)
407*4882a593Smuzhiyun BUILD_CONTROLS_SHADOW(pin, PIN_BASED_VM_EXEC_CONTROL)
408*4882a593Smuzhiyun BUILD_CONTROLS_SHADOW(exec, CPU_BASED_VM_EXEC_CONTROL)
409*4882a593Smuzhiyun BUILD_CONTROLS_SHADOW(secondary_exec, SECONDARY_VM_EXEC_CONTROL)
410*4882a593Smuzhiyun
411*4882a593Smuzhiyun static inline void vmx_register_cache_reset(struct kvm_vcpu *vcpu)
412*4882a593Smuzhiyun {
413*4882a593Smuzhiyun vcpu->arch.regs_avail = ~((1 << VCPU_REGS_RIP) | (1 << VCPU_REGS_RSP)
414*4882a593Smuzhiyun | (1 << VCPU_EXREG_RFLAGS)
415*4882a593Smuzhiyun | (1 << VCPU_EXREG_PDPTR)
416*4882a593Smuzhiyun | (1 << VCPU_EXREG_SEGMENTS)
417*4882a593Smuzhiyun | (1 << VCPU_EXREG_CR0)
418*4882a593Smuzhiyun | (1 << VCPU_EXREG_CR3)
419*4882a593Smuzhiyun | (1 << VCPU_EXREG_CR4)
420*4882a593Smuzhiyun | (1 << VCPU_EXREG_EXIT_INFO_1)
421*4882a593Smuzhiyun | (1 << VCPU_EXREG_EXIT_INFO_2));
422*4882a593Smuzhiyun vcpu->arch.regs_dirty = 0;
423*4882a593Smuzhiyun }
424*4882a593Smuzhiyun
vmx_vmentry_ctrl(void)425*4882a593Smuzhiyun static inline u32 vmx_vmentry_ctrl(void)
426*4882a593Smuzhiyun {
427*4882a593Smuzhiyun u32 vmentry_ctrl = vmcs_config.vmentry_ctrl;
428*4882a593Smuzhiyun if (vmx_pt_mode_is_system())
429*4882a593Smuzhiyun vmentry_ctrl &= ~(VM_ENTRY_PT_CONCEAL_PIP |
430*4882a593Smuzhiyun VM_ENTRY_LOAD_IA32_RTIT_CTL);
431*4882a593Smuzhiyun /* Loading of EFER and PERF_GLOBAL_CTRL are toggled dynamically */
432*4882a593Smuzhiyun return vmentry_ctrl &
433*4882a593Smuzhiyun ~(VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VM_ENTRY_LOAD_IA32_EFER);
434*4882a593Smuzhiyun }
435*4882a593Smuzhiyun
vmx_vmexit_ctrl(void)436*4882a593Smuzhiyun static inline u32 vmx_vmexit_ctrl(void)
437*4882a593Smuzhiyun {
438*4882a593Smuzhiyun u32 vmexit_ctrl = vmcs_config.vmexit_ctrl;
439*4882a593Smuzhiyun if (vmx_pt_mode_is_system())
440*4882a593Smuzhiyun vmexit_ctrl &= ~(VM_EXIT_PT_CONCEAL_PIP |
441*4882a593Smuzhiyun VM_EXIT_CLEAR_IA32_RTIT_CTL);
442*4882a593Smuzhiyun /* Loading of EFER and PERF_GLOBAL_CTRL are toggled dynamically */
443*4882a593Smuzhiyun return vmexit_ctrl &
444*4882a593Smuzhiyun ~(VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL | VM_EXIT_LOAD_IA32_EFER);
445*4882a593Smuzhiyun }
446*4882a593Smuzhiyun
447*4882a593Smuzhiyun u32 vmx_exec_control(struct vcpu_vmx *vmx);
448*4882a593Smuzhiyun u32 vmx_pin_based_exec_ctrl(struct vcpu_vmx *vmx);
449*4882a593Smuzhiyun
to_kvm_vmx(struct kvm * kvm)450*4882a593Smuzhiyun static inline struct kvm_vmx *to_kvm_vmx(struct kvm *kvm)
451*4882a593Smuzhiyun {
452*4882a593Smuzhiyun return container_of(kvm, struct kvm_vmx, kvm);
453*4882a593Smuzhiyun }
454*4882a593Smuzhiyun
to_vmx(struct kvm_vcpu * vcpu)455*4882a593Smuzhiyun static inline struct vcpu_vmx *to_vmx(struct kvm_vcpu *vcpu)
456*4882a593Smuzhiyun {
457*4882a593Smuzhiyun return container_of(vcpu, struct vcpu_vmx, vcpu);
458*4882a593Smuzhiyun }
459*4882a593Smuzhiyun
vmx_get_exit_qual(struct kvm_vcpu * vcpu)460*4882a593Smuzhiyun static inline unsigned long vmx_get_exit_qual(struct kvm_vcpu *vcpu)
461*4882a593Smuzhiyun {
462*4882a593Smuzhiyun struct vcpu_vmx *vmx = to_vmx(vcpu);
463*4882a593Smuzhiyun
464*4882a593Smuzhiyun if (!kvm_register_is_available(vcpu, VCPU_EXREG_EXIT_INFO_1)) {
465*4882a593Smuzhiyun kvm_register_mark_available(vcpu, VCPU_EXREG_EXIT_INFO_1);
466*4882a593Smuzhiyun vmx->exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
467*4882a593Smuzhiyun }
468*4882a593Smuzhiyun return vmx->exit_qualification;
469*4882a593Smuzhiyun }
470*4882a593Smuzhiyun
vmx_get_intr_info(struct kvm_vcpu * vcpu)471*4882a593Smuzhiyun static inline u32 vmx_get_intr_info(struct kvm_vcpu *vcpu)
472*4882a593Smuzhiyun {
473*4882a593Smuzhiyun struct vcpu_vmx *vmx = to_vmx(vcpu);
474*4882a593Smuzhiyun
475*4882a593Smuzhiyun if (!kvm_register_is_available(vcpu, VCPU_EXREG_EXIT_INFO_2)) {
476*4882a593Smuzhiyun kvm_register_mark_available(vcpu, VCPU_EXREG_EXIT_INFO_2);
477*4882a593Smuzhiyun vmx->exit_intr_info = vmcs_read32(VM_EXIT_INTR_INFO);
478*4882a593Smuzhiyun }
479*4882a593Smuzhiyun return vmx->exit_intr_info;
480*4882a593Smuzhiyun }
481*4882a593Smuzhiyun
482*4882a593Smuzhiyun struct vmcs *alloc_vmcs_cpu(bool shadow, int cpu, gfp_t flags);
483*4882a593Smuzhiyun void free_vmcs(struct vmcs *vmcs);
484*4882a593Smuzhiyun int alloc_loaded_vmcs(struct loaded_vmcs *loaded_vmcs);
485*4882a593Smuzhiyun void free_loaded_vmcs(struct loaded_vmcs *loaded_vmcs);
486*4882a593Smuzhiyun void loaded_vmcs_clear(struct loaded_vmcs *loaded_vmcs);
487*4882a593Smuzhiyun
alloc_vmcs(bool shadow)488*4882a593Smuzhiyun static inline struct vmcs *alloc_vmcs(bool shadow)
489*4882a593Smuzhiyun {
490*4882a593Smuzhiyun return alloc_vmcs_cpu(shadow, raw_smp_processor_id(),
491*4882a593Smuzhiyun GFP_KERNEL_ACCOUNT);
492*4882a593Smuzhiyun }
493*4882a593Smuzhiyun
decache_tsc_multiplier(struct vcpu_vmx * vmx)494*4882a593Smuzhiyun static inline void decache_tsc_multiplier(struct vcpu_vmx *vmx)
495*4882a593Smuzhiyun {
496*4882a593Smuzhiyun vmx->current_tsc_ratio = vmx->vcpu.arch.tsc_scaling_ratio;
497*4882a593Smuzhiyun vmcs_write64(TSC_MULTIPLIER, vmx->current_tsc_ratio);
498*4882a593Smuzhiyun }
499*4882a593Smuzhiyun
vmx_has_waitpkg(struct vcpu_vmx * vmx)500*4882a593Smuzhiyun static inline bool vmx_has_waitpkg(struct vcpu_vmx *vmx)
501*4882a593Smuzhiyun {
502*4882a593Smuzhiyun return secondary_exec_controls_get(vmx) &
503*4882a593Smuzhiyun SECONDARY_EXEC_ENABLE_USR_WAIT_PAUSE;
504*4882a593Smuzhiyun }
505*4882a593Smuzhiyun
vmx_need_pf_intercept(struct kvm_vcpu * vcpu)506*4882a593Smuzhiyun static inline bool vmx_need_pf_intercept(struct kvm_vcpu *vcpu)
507*4882a593Smuzhiyun {
508*4882a593Smuzhiyun if (!enable_ept)
509*4882a593Smuzhiyun return true;
510*4882a593Smuzhiyun
511*4882a593Smuzhiyun return allow_smaller_maxphyaddr && cpuid_maxphyaddr(vcpu) < boot_cpu_data.x86_phys_bits;
512*4882a593Smuzhiyun }
513*4882a593Smuzhiyun
is_unrestricted_guest(struct kvm_vcpu * vcpu)514*4882a593Smuzhiyun static inline bool is_unrestricted_guest(struct kvm_vcpu *vcpu)
515*4882a593Smuzhiyun {
516*4882a593Smuzhiyun return enable_unrestricted_guest && (!is_guest_mode(vcpu) ||
517*4882a593Smuzhiyun (secondary_exec_controls_get(to_vmx(vcpu)) &
518*4882a593Smuzhiyun SECONDARY_EXEC_UNRESTRICTED_GUEST));
519*4882a593Smuzhiyun }
520*4882a593Smuzhiyun
521*4882a593Smuzhiyun bool __vmx_guest_state_valid(struct kvm_vcpu *vcpu);
vmx_guest_state_valid(struct kvm_vcpu * vcpu)522*4882a593Smuzhiyun static inline bool vmx_guest_state_valid(struct kvm_vcpu *vcpu)
523*4882a593Smuzhiyun {
524*4882a593Smuzhiyun return is_unrestricted_guest(vcpu) || __vmx_guest_state_valid(vcpu);
525*4882a593Smuzhiyun }
526*4882a593Smuzhiyun
527*4882a593Smuzhiyun void dump_vmcs(void);
528*4882a593Smuzhiyun
529*4882a593Smuzhiyun #endif /* __KVM_X86_VMX_H */
530