Lines Matching refs:vmx
376 static __always_inline void vmx_disable_fb_clear(struct vcpu_vmx *vmx) in vmx_disable_fb_clear() argument
380 if (!vmx->disable_fb_clear) in vmx_disable_fb_clear()
387 vmx->msr_ia32_mcu_opt_ctrl = msr; in vmx_disable_fb_clear()
390 static __always_inline void vmx_enable_fb_clear(struct vcpu_vmx *vmx) in vmx_enable_fb_clear() argument
392 if (!vmx->disable_fb_clear) in vmx_enable_fb_clear()
395 vmx->msr_ia32_mcu_opt_ctrl &= ~FB_CLEAR_DIS; in vmx_enable_fb_clear()
396 native_wrmsrl(MSR_IA32_MCU_OPT_CTRL, vmx->msr_ia32_mcu_opt_ctrl); in vmx_enable_fb_clear()
399 static void vmx_update_fb_clear_dis(struct kvm_vcpu *vcpu, struct vcpu_vmx *vmx) in vmx_update_fb_clear_dis() argument
401 vmx->disable_fb_clear = vmx_fb_clear_ctrl_available; in vmx_update_fb_clear_dis()
414 vmx->disable_fb_clear = false; in vmx_update_fb_clear_dis()
509 static inline void vmx_segment_cache_clear(struct vcpu_vmx *vmx) in vmx_segment_cache_clear() argument
511 vmx->segment_cache.bitmask = 0; in vmx_segment_cache_clear()
732 static inline int __vmx_find_uret_msr(struct vcpu_vmx *vmx, u32 msr) in __vmx_find_uret_msr() argument
736 for (i = 0; i < vmx->nr_uret_msrs; ++i) in __vmx_find_uret_msr()
737 if (vmx_uret_msrs_list[vmx->guest_uret_msrs[i].slot] == msr) in __vmx_find_uret_msr()
742 struct vmx_uret_msr *vmx_find_uret_msr(struct vcpu_vmx *vmx, u32 msr) in vmx_find_uret_msr() argument
746 i = __vmx_find_uret_msr(vmx, msr); in vmx_find_uret_msr()
748 return &vmx->guest_uret_msrs[i]; in vmx_find_uret_msr()
752 static int vmx_set_guest_uret_msr(struct vcpu_vmx *vmx, in vmx_set_guest_uret_msr() argument
759 if (msr - vmx->guest_uret_msrs < vmx->nr_active_uret_msrs) { in vmx_set_guest_uret_msr()
819 static bool vmx_segment_cache_test_set(struct vcpu_vmx *vmx, unsigned seg, in vmx_segment_cache_test_set() argument
825 if (!kvm_register_is_available(&vmx->vcpu, VCPU_EXREG_SEGMENTS)) { in vmx_segment_cache_test_set()
826 kvm_register_mark_available(&vmx->vcpu, VCPU_EXREG_SEGMENTS); in vmx_segment_cache_test_set()
827 vmx->segment_cache.bitmask = 0; in vmx_segment_cache_test_set()
829 ret = vmx->segment_cache.bitmask & mask; in vmx_segment_cache_test_set()
830 vmx->segment_cache.bitmask |= mask; in vmx_segment_cache_test_set()
834 static u16 vmx_read_guest_seg_selector(struct vcpu_vmx *vmx, unsigned seg) in vmx_read_guest_seg_selector() argument
836 u16 *p = &vmx->segment_cache.seg[seg].selector; in vmx_read_guest_seg_selector()
838 if (!vmx_segment_cache_test_set(vmx, seg, SEG_FIELD_SEL)) in vmx_read_guest_seg_selector()
843 static ulong vmx_read_guest_seg_base(struct vcpu_vmx *vmx, unsigned seg) in vmx_read_guest_seg_base() argument
845 ulong *p = &vmx->segment_cache.seg[seg].base; in vmx_read_guest_seg_base()
847 if (!vmx_segment_cache_test_set(vmx, seg, SEG_FIELD_BASE)) in vmx_read_guest_seg_base()
852 static u32 vmx_read_guest_seg_limit(struct vcpu_vmx *vmx, unsigned seg) in vmx_read_guest_seg_limit() argument
854 u32 *p = &vmx->segment_cache.seg[seg].limit; in vmx_read_guest_seg_limit()
856 if (!vmx_segment_cache_test_set(vmx, seg, SEG_FIELD_LIMIT)) in vmx_read_guest_seg_limit()
861 static u32 vmx_read_guest_seg_ar(struct vcpu_vmx *vmx, unsigned seg) in vmx_read_guest_seg_ar() argument
863 u32 *p = &vmx->segment_cache.seg[seg].ar; in vmx_read_guest_seg_ar()
865 if (!vmx_segment_cache_test_set(vmx, seg, SEG_FIELD_AR)) in vmx_read_guest_seg_ar()
919 static bool msr_write_intercepted(struct vcpu_vmx *vmx, u32 msr) in msr_write_intercepted() argument
924 if (!(exec_controls_get(vmx) & CPU_BASED_USE_MSR_BITMAPS)) in msr_write_intercepted()
927 msr_bitmap = vmx->loaded_vmcs->msr_bitmap; in msr_write_intercepted()
939 unsigned int __vmx_vcpu_run_flags(struct vcpu_vmx *vmx) in __vmx_vcpu_run_flags() argument
943 if (vmx->loaded_vmcs->launched) in __vmx_vcpu_run_flags()
951 if (unlikely(!msr_write_intercepted(vmx, MSR_IA32_SPEC_CTRL))) in __vmx_vcpu_run_flags()
957 static void clear_atomic_switch_msr_special(struct vcpu_vmx *vmx, in clear_atomic_switch_msr_special() argument
960 vm_entry_controls_clearbit(vmx, entry); in clear_atomic_switch_msr_special()
961 vm_exit_controls_clearbit(vmx, exit); in clear_atomic_switch_msr_special()
975 static void clear_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr) in clear_atomic_switch_msr() argument
978 struct msr_autoload *m = &vmx->msr_autoload; in clear_atomic_switch_msr()
983 clear_atomic_switch_msr_special(vmx, in clear_atomic_switch_msr()
991 clear_atomic_switch_msr_special(vmx, in clear_atomic_switch_msr()
1015 static void add_atomic_switch_msr_special(struct vcpu_vmx *vmx, in add_atomic_switch_msr_special() argument
1023 vm_entry_controls_setbit(vmx, entry); in add_atomic_switch_msr_special()
1024 vm_exit_controls_setbit(vmx, exit); in add_atomic_switch_msr_special()
1027 static void add_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr, in add_atomic_switch_msr() argument
1031 struct msr_autoload *m = &vmx->msr_autoload; in add_atomic_switch_msr()
1036 add_atomic_switch_msr_special(vmx, in add_atomic_switch_msr()
1047 add_atomic_switch_msr_special(vmx, in add_atomic_switch_msr()
1093 static bool update_transition_efer(struct vcpu_vmx *vmx) in update_transition_efer() argument
1095 u64 guest_efer = vmx->vcpu.arch.efer; in update_transition_efer()
1120 (enable_ept && ((vmx->vcpu.arch.efer ^ host_efer) & EFER_NX))) { in update_transition_efer()
1124 add_atomic_switch_msr(vmx, MSR_EFER, in update_transition_efer()
1127 clear_atomic_switch_msr(vmx, MSR_EFER); in update_transition_efer()
1131 i = __vmx_find_uret_msr(vmx, MSR_EFER); in update_transition_efer()
1135 clear_atomic_switch_msr(vmx, MSR_EFER); in update_transition_efer()
1140 vmx->guest_uret_msrs[i].data = guest_efer; in update_transition_efer()
1141 vmx->guest_uret_msrs[i].mask = ~ignore_bits; in update_transition_efer()
1175 static inline bool pt_can_write_msr(struct vcpu_vmx *vmx) in pt_can_write_msr() argument
1178 !(vmx->pt_desc.guest.ctl & RTIT_CTL_TRACEEN); in pt_can_write_msr()
1215 static void pt_guest_enter(struct vcpu_vmx *vmx) in pt_guest_enter() argument
1224 rdmsrl(MSR_IA32_RTIT_CTL, vmx->pt_desc.host.ctl); in pt_guest_enter()
1225 if (vmx->pt_desc.guest.ctl & RTIT_CTL_TRACEEN) { in pt_guest_enter()
1227 pt_save_msr(&vmx->pt_desc.host, vmx->pt_desc.addr_range); in pt_guest_enter()
1228 pt_load_msr(&vmx->pt_desc.guest, vmx->pt_desc.addr_range); in pt_guest_enter()
1232 static void pt_guest_exit(struct vcpu_vmx *vmx) in pt_guest_exit() argument
1237 if (vmx->pt_desc.guest.ctl & RTIT_CTL_TRACEEN) { in pt_guest_exit()
1238 pt_save_msr(&vmx->pt_desc.guest, vmx->pt_desc.addr_range); in pt_guest_exit()
1239 pt_load_msr(&vmx->pt_desc.host, vmx->pt_desc.addr_range); in pt_guest_exit()
1243 wrmsrl(MSR_IA32_RTIT_CTL, vmx->pt_desc.host.ctl); in pt_guest_exit()
1275 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_prepare_switch_to_guest() local
1284 vmx->req_immediate_exit = false; in vmx_prepare_switch_to_guest()
1291 if (!vmx->guest_uret_msrs_loaded) { in vmx_prepare_switch_to_guest()
1292 vmx->guest_uret_msrs_loaded = true; in vmx_prepare_switch_to_guest()
1293 for (i = 0; i < vmx->nr_active_uret_msrs; ++i) in vmx_prepare_switch_to_guest()
1294 kvm_set_user_return_msr(vmx->guest_uret_msrs[i].slot, in vmx_prepare_switch_to_guest()
1295 vmx->guest_uret_msrs[i].data, in vmx_prepare_switch_to_guest()
1296 vmx->guest_uret_msrs[i].mask); in vmx_prepare_switch_to_guest()
1300 if (vmx->nested.need_vmcs12_to_shadow_sync) in vmx_prepare_switch_to_guest()
1303 if (vmx->guest_state_loaded) in vmx_prepare_switch_to_guest()
1306 host_state = &vmx->loaded_vmcs->host_state; in vmx_prepare_switch_to_guest()
1324 vmx->msr_host_kernel_gs_base = current->thread.gsbase; in vmx_prepare_switch_to_guest()
1329 vmx->msr_host_kernel_gs_base = read_msr(MSR_KERNEL_GS_BASE); in vmx_prepare_switch_to_guest()
1332 wrmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base); in vmx_prepare_switch_to_guest()
1341 vmx->guest_state_loaded = true; in vmx_prepare_switch_to_guest()
1344 static void vmx_prepare_switch_to_host(struct vcpu_vmx *vmx) in vmx_prepare_switch_to_host() argument
1348 if (!vmx->guest_state_loaded) in vmx_prepare_switch_to_host()
1351 host_state = &vmx->loaded_vmcs->host_state; in vmx_prepare_switch_to_host()
1353 ++vmx->vcpu.stat.host_state_reload; in vmx_prepare_switch_to_host()
1356 rdmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base); in vmx_prepare_switch_to_host()
1376 wrmsrl(MSR_KERNEL_GS_BASE, vmx->msr_host_kernel_gs_base); in vmx_prepare_switch_to_host()
1379 vmx->guest_state_loaded = false; in vmx_prepare_switch_to_host()
1380 vmx->guest_uret_msrs_loaded = false; in vmx_prepare_switch_to_host()
1384 static u64 vmx_read_guest_kernel_gs_base(struct vcpu_vmx *vmx) in vmx_read_guest_kernel_gs_base() argument
1387 if (vmx->guest_state_loaded) in vmx_read_guest_kernel_gs_base()
1388 rdmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base); in vmx_read_guest_kernel_gs_base()
1390 return vmx->msr_guest_kernel_gs_base; in vmx_read_guest_kernel_gs_base()
1393 static void vmx_write_guest_kernel_gs_base(struct vcpu_vmx *vmx, u64 data) in vmx_write_guest_kernel_gs_base() argument
1396 if (vmx->guest_state_loaded) in vmx_write_guest_kernel_gs_base()
1399 vmx->msr_guest_kernel_gs_base = data; in vmx_write_guest_kernel_gs_base()
1406 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_vcpu_load_vmcs() local
1407 bool already_loaded = vmx->loaded_vmcs->cpu == cpu; in vmx_vcpu_load_vmcs()
1411 loaded_vmcs_clear(vmx->loaded_vmcs); in vmx_vcpu_load_vmcs()
1422 list_add(&vmx->loaded_vmcs->loaded_vmcss_on_cpu_link, in vmx_vcpu_load_vmcs()
1428 if (prev != vmx->loaded_vmcs->vmcs) { in vmx_vcpu_load_vmcs()
1429 per_cpu(current_vmcs, cpu) = vmx->loaded_vmcs->vmcs; in vmx_vcpu_load_vmcs()
1430 vmcs_load(vmx->loaded_vmcs->vmcs); in vmx_vcpu_load_vmcs()
1462 vmx->loaded_vmcs->cpu = cpu; in vmx_vcpu_load_vmcs()
1467 vmx->current_tsc_ratio != vcpu->arch.tsc_scaling_ratio) in vmx_vcpu_load_vmcs()
1468 decache_tsc_multiplier(vmx); in vmx_vcpu_load_vmcs()
1477 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_vcpu_load() local
1483 vmx->host_debugctlmsr = get_debugctlmsr(); in vmx_vcpu_load()
1500 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_get_rflags() local
1506 if (vmx->rmode.vm86_active) { in vmx_get_rflags()
1508 save_rflags = vmx->rmode.save_rflags; in vmx_get_rflags()
1511 vmx->rflags = rflags; in vmx_get_rflags()
1513 return vmx->rflags; in vmx_get_rflags()
1518 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_set_rflags() local
1523 vmx->rflags = rflags; in vmx_set_rflags()
1529 vmx->rflags = rflags; in vmx_set_rflags()
1530 if (vmx->rmode.vm86_active) { in vmx_set_rflags()
1531 vmx->rmode.save_rflags = rflags; in vmx_set_rflags()
1536 if ((old_rflags ^ vmx->rflags) & X86_EFLAGS_VM) in vmx_set_rflags()
1537 vmx->emulation_required = emulation_required(vcpu); in vmx_set_rflags()
1571 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_rtit_ctl_check() local
1578 if (data & vmx->pt_desc.ctl_bitmask) in vmx_rtit_ctl_check()
1585 if ((vmx->pt_desc.guest.ctl & RTIT_CTL_TRACEEN) && in vmx_rtit_ctl_check()
1586 ((vmx->pt_desc.guest.ctl ^ data) & ~RTIT_CTL_TRACEEN)) in vmx_rtit_ctl_check()
1596 !intel_pt_validate_cap(vmx->pt_desc.caps, in vmx_rtit_ctl_check()
1604 value = intel_pt_validate_cap(vmx->pt_desc.caps, PT_CAP_mtc_periods); in vmx_rtit_ctl_check()
1605 if (intel_pt_validate_cap(vmx->pt_desc.caps, PT_CAP_mtc) && in vmx_rtit_ctl_check()
1609 value = intel_pt_validate_cap(vmx->pt_desc.caps, in vmx_rtit_ctl_check()
1611 if (intel_pt_validate_cap(vmx->pt_desc.caps, PT_CAP_psb_cyc) && in vmx_rtit_ctl_check()
1615 value = intel_pt_validate_cap(vmx->pt_desc.caps, PT_CAP_psb_periods); in vmx_rtit_ctl_check()
1616 if (intel_pt_validate_cap(vmx->pt_desc.caps, PT_CAP_psb_cyc) && in vmx_rtit_ctl_check()
1626 if ((value && (vmx->pt_desc.addr_range < 1)) || (value > 2)) in vmx_rtit_ctl_check()
1629 if ((value && (vmx->pt_desc.addr_range < 2)) || (value > 2)) in vmx_rtit_ctl_check()
1632 if ((value && (vmx->pt_desc.addr_range < 3)) || (value > 2)) in vmx_rtit_ctl_check()
1635 if ((value && (vmx->pt_desc.addr_range < 4)) || (value > 2)) in vmx_rtit_ctl_check()
1690 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_update_emulated_instruction() local
1705 vmx->nested.mtf_pending = true; in vmx_update_emulated_instruction()
1707 vmx->nested.mtf_pending = false; in vmx_update_emulated_instruction()
1731 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_queue_exception() local
1754 if (vmx->rmode.vm86_active) { in vmx_queue_exception()
1762 WARN_ON_ONCE(vmx->emulation_required); in vmx_queue_exception()
1766 vmx->vcpu.arch.event_exit_inst_len); in vmx_queue_exception()
1776 static void vmx_setup_uret_msr(struct vcpu_vmx *vmx, unsigned int msr) in vmx_setup_uret_msr() argument
1781 from = __vmx_find_uret_msr(vmx, msr); in vmx_setup_uret_msr()
1784 to = vmx->nr_active_uret_msrs++; in vmx_setup_uret_msr()
1786 tmp = vmx->guest_uret_msrs[to]; in vmx_setup_uret_msr()
1787 vmx->guest_uret_msrs[to] = vmx->guest_uret_msrs[from]; in vmx_setup_uret_msr()
1788 vmx->guest_uret_msrs[from] = tmp; in vmx_setup_uret_msr()
1796 static void setup_msrs(struct vcpu_vmx *vmx) in setup_msrs() argument
1798 vmx->guest_uret_msrs_loaded = false; in setup_msrs()
1799 vmx->nr_active_uret_msrs = 0; in setup_msrs()
1805 if (is_long_mode(&vmx->vcpu) && (vmx->vcpu.arch.efer & EFER_SCE)) { in setup_msrs()
1806 vmx_setup_uret_msr(vmx, MSR_STAR); in setup_msrs()
1807 vmx_setup_uret_msr(vmx, MSR_LSTAR); in setup_msrs()
1808 vmx_setup_uret_msr(vmx, MSR_SYSCALL_MASK); in setup_msrs()
1811 if (update_transition_efer(vmx)) in setup_msrs()
1812 vmx_setup_uret_msr(vmx, MSR_EFER); in setup_msrs()
1814 if (guest_cpuid_has(&vmx->vcpu, X86_FEATURE_RDTSCP)) in setup_msrs()
1815 vmx_setup_uret_msr(vmx, MSR_TSC_AUX); in setup_msrs()
1817 vmx_setup_uret_msr(vmx, MSR_IA32_TSX_CTRL); in setup_msrs()
1820 vmx_update_msr_bitmap(&vmx->vcpu); in setup_msrs()
1886 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_get_msr() local
1899 msr_info->data = vmx_read_guest_kernel_gs_base(vmx); in vmx_get_msr()
1910 if (!msr_info->host_initiated && !vmx_has_waitpkg(vmx)) in vmx_get_msr()
1913 msr_info->data = vmx->msr_ia32_umwait_control; in vmx_get_msr()
1940 !(vmx->msr_ia32_feature_control & in vmx_get_msr()
1946 msr_info->data = vmx->msr_ia32_feature_control; in vmx_get_msr()
1951 if (vmx_get_vmx_msr(&vmx->nested.msrs, msr_info->index, in vmx_get_msr()
1962 vmx->nested.enlightened_vmcs_enabled) in vmx_get_msr()
1969 msr_info->data = vmx->pt_desc.guest.ctl; in vmx_get_msr()
1974 msr_info->data = vmx->pt_desc.guest.status; in vmx_get_msr()
1978 !intel_pt_validate_cap(vmx->pt_desc.caps, in vmx_get_msr()
1981 msr_info->data = vmx->pt_desc.guest.cr3_match; in vmx_get_msr()
1985 (!intel_pt_validate_cap(vmx->pt_desc.caps, in vmx_get_msr()
1987 !intel_pt_validate_cap(vmx->pt_desc.caps, in vmx_get_msr()
1990 msr_info->data = vmx->pt_desc.guest.output_base; in vmx_get_msr()
1994 (!intel_pt_validate_cap(vmx->pt_desc.caps, in vmx_get_msr()
1996 !intel_pt_validate_cap(vmx->pt_desc.caps, in vmx_get_msr()
1999 msr_info->data = vmx->pt_desc.guest.output_mask; in vmx_get_msr()
2004 (index >= 2 * intel_pt_validate_cap(vmx->pt_desc.caps, in vmx_get_msr()
2008 msr_info->data = vmx->pt_desc.guest.addr_b[index / 2]; in vmx_get_msr()
2010 msr_info->data = vmx->pt_desc.guest.addr_a[index / 2]; in vmx_get_msr()
2019 msr = vmx_find_uret_msr(vmx, msr_info->index); in vmx_get_msr()
2047 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_set_msr() local
2060 vmx_segment_cache_clear(vmx); in vmx_set_msr()
2064 vmx_segment_cache_clear(vmx); in vmx_set_msr()
2068 vmx_write_guest_kernel_gs_base(vmx, data); in vmx_set_msr()
2109 if (!msr_info->host_initiated && !vmx_has_waitpkg(vmx)) in vmx_set_msr()
2116 vmx->msr_ia32_umwait_control = data; in vmx_set_msr()
2126 vmx->spec_ctrl = data; in vmx_set_msr()
2211 vmx->msr_ia32_feature_control = data; in vmx_set_msr()
2224 vmx->nested.vmxon) in vmx_set_msr()
2227 vmx->pt_desc.guest.ctl = data; in vmx_set_msr()
2231 if (!pt_can_write_msr(vmx)) in vmx_set_msr()
2235 vmx->pt_desc.guest.status = data; in vmx_set_msr()
2238 if (!pt_can_write_msr(vmx)) in vmx_set_msr()
2240 if (!intel_pt_validate_cap(vmx->pt_desc.caps, in vmx_set_msr()
2243 vmx->pt_desc.guest.cr3_match = data; in vmx_set_msr()
2246 if (!pt_can_write_msr(vmx)) in vmx_set_msr()
2248 if (!intel_pt_validate_cap(vmx->pt_desc.caps, in vmx_set_msr()
2250 !intel_pt_validate_cap(vmx->pt_desc.caps, in vmx_set_msr()
2255 vmx->pt_desc.guest.output_base = data; in vmx_set_msr()
2258 if (!pt_can_write_msr(vmx)) in vmx_set_msr()
2260 if (!intel_pt_validate_cap(vmx->pt_desc.caps, in vmx_set_msr()
2262 !intel_pt_validate_cap(vmx->pt_desc.caps, in vmx_set_msr()
2265 vmx->pt_desc.guest.output_mask = data; in vmx_set_msr()
2268 if (!pt_can_write_msr(vmx)) in vmx_set_msr()
2271 if (index >= 2 * intel_pt_validate_cap(vmx->pt_desc.caps, in vmx_set_msr()
2277 vmx->pt_desc.guest.addr_b[index / 2] = data; in vmx_set_msr()
2279 vmx->pt_desc.guest.addr_a[index / 2] = data; in vmx_set_msr()
2292 msr = vmx_find_uret_msr(vmx, msr_index); in vmx_set_msr()
2294 ret = vmx_set_guest_uret_msr(vmx, msr, data); in vmx_set_msr()
2301 vmx_update_fb_clear_dis(vcpu, vmx); in vmx_set_msr()
2811 struct vcpu_vmx *vmx = to_vmx(vcpu); in enter_pmode() local
2817 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_ES], VCPU_SREG_ES); in enter_pmode()
2818 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_DS], VCPU_SREG_DS); in enter_pmode()
2819 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_FS], VCPU_SREG_FS); in enter_pmode()
2820 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_GS], VCPU_SREG_GS); in enter_pmode()
2821 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_SS], VCPU_SREG_SS); in enter_pmode()
2822 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_CS], VCPU_SREG_CS); in enter_pmode()
2824 vmx->rmode.vm86_active = 0; in enter_pmode()
2826 vmx_set_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_TR], VCPU_SREG_TR); in enter_pmode()
2830 flags |= vmx->rmode.save_rflags & ~RMODE_GUEST_OWNED_EFLAGS_BITS; in enter_pmode()
2838 fix_pmode_seg(vcpu, VCPU_SREG_CS, &vmx->rmode.segs[VCPU_SREG_CS]); in enter_pmode()
2839 fix_pmode_seg(vcpu, VCPU_SREG_SS, &vmx->rmode.segs[VCPU_SREG_SS]); in enter_pmode()
2840 fix_pmode_seg(vcpu, VCPU_SREG_ES, &vmx->rmode.segs[VCPU_SREG_ES]); in enter_pmode()
2841 fix_pmode_seg(vcpu, VCPU_SREG_DS, &vmx->rmode.segs[VCPU_SREG_DS]); in enter_pmode()
2842 fix_pmode_seg(vcpu, VCPU_SREG_FS, &vmx->rmode.segs[VCPU_SREG_FS]); in enter_pmode()
2843 fix_pmode_seg(vcpu, VCPU_SREG_GS, &vmx->rmode.segs[VCPU_SREG_GS]); in enter_pmode()
2882 struct vcpu_vmx *vmx = to_vmx(vcpu); in enter_rmode() local
2885 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_TR], VCPU_SREG_TR); in enter_rmode()
2886 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_ES], VCPU_SREG_ES); in enter_rmode()
2887 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_DS], VCPU_SREG_DS); in enter_rmode()
2888 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_FS], VCPU_SREG_FS); in enter_rmode()
2889 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_GS], VCPU_SREG_GS); in enter_rmode()
2890 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_SS], VCPU_SREG_SS); in enter_rmode()
2891 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_CS], VCPU_SREG_CS); in enter_rmode()
2893 vmx->rmode.vm86_active = 1; in enter_rmode()
2903 vmx_segment_cache_clear(vmx); in enter_rmode()
2910 vmx->rmode.save_rflags = flags; in enter_rmode()
2918 fix_rmode_seg(VCPU_SREG_SS, &vmx->rmode.segs[VCPU_SREG_SS]); in enter_rmode()
2919 fix_rmode_seg(VCPU_SREG_CS, &vmx->rmode.segs[VCPU_SREG_CS]); in enter_rmode()
2920 fix_rmode_seg(VCPU_SREG_ES, &vmx->rmode.segs[VCPU_SREG_ES]); in enter_rmode()
2921 fix_rmode_seg(VCPU_SREG_DS, &vmx->rmode.segs[VCPU_SREG_DS]); in enter_rmode()
2922 fix_rmode_seg(VCPU_SREG_GS, &vmx->rmode.segs[VCPU_SREG_GS]); in enter_rmode()
2923 fix_rmode_seg(VCPU_SREG_FS, &vmx->rmode.segs[VCPU_SREG_FS]); in enter_rmode()
2930 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_set_efer() local
2931 struct vmx_uret_msr *msr = vmx_find_uret_msr(vmx, MSR_EFER); in vmx_set_efer()
2946 setup_msrs(vmx); in vmx_set_efer()
2979 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_flush_tlb_all() local
2994 vpid_sync_vcpu_single(vmx->vpid); in vmx_flush_tlb_all()
2995 vpid_sync_vcpu_single(vmx->nested.vpid02); in vmx_flush_tlb_all()
3078 struct vcpu_vmx *vmx = to_vmx(vcpu); in ept_update_paging_mode_cr0() local
3084 exec_controls_setbit(vmx, CPU_BASED_CR3_LOAD_EXITING | in ept_update_paging_mode_cr0()
3090 exec_controls_clearbit(vmx, CPU_BASED_CR3_LOAD_EXITING | in ept_update_paging_mode_cr0()
3102 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_set_cr0() local
3111 if (vmx->rmode.vm86_active && (cr0 & X86_CR0_PE)) in vmx_set_cr0()
3114 if (!vmx->rmode.vm86_active && !(cr0 & X86_CR0_PE)) in vmx_set_cr0()
3136 vmx->emulation_required = emulation_required(vcpu); in vmx_set_cr0()
3214 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_set_cr4() local
3225 else if (vmx->rmode.vm86_active) in vmx_set_cr4()
3232 secondary_exec_controls_setbit(vmx, SECONDARY_EXEC_DESC); in vmx_set_cr4()
3236 secondary_exec_controls_clearbit(vmx, SECONDARY_EXEC_DESC); in vmx_set_cr4()
3274 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_get_segment() local
3277 if (vmx->rmode.vm86_active && seg != VCPU_SREG_LDTR) { in vmx_get_segment()
3278 *var = vmx->rmode.segs[seg]; in vmx_get_segment()
3280 || var->selector == vmx_read_guest_seg_selector(vmx, seg)) in vmx_get_segment()
3282 var->base = vmx_read_guest_seg_base(vmx, seg); in vmx_get_segment()
3283 var->selector = vmx_read_guest_seg_selector(vmx, seg); in vmx_get_segment()
3286 var->base = vmx_read_guest_seg_base(vmx, seg); in vmx_get_segment()
3287 var->limit = vmx_read_guest_seg_limit(vmx, seg); in vmx_get_segment()
3288 var->selector = vmx_read_guest_seg_selector(vmx, seg); in vmx_get_segment()
3289 ar = vmx_read_guest_seg_ar(vmx, seg); in vmx_get_segment()
3321 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_get_cpl() local
3323 if (unlikely(vmx->rmode.vm86_active)) in vmx_get_cpl()
3326 int ar = vmx_read_guest_seg_ar(vmx, VCPU_SREG_SS); in vmx_get_cpl()
3353 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_set_segment() local
3356 vmx_segment_cache_clear(vmx); in vmx_set_segment()
3358 if (vmx->rmode.vm86_active && seg != VCPU_SREG_LDTR) { in vmx_set_segment()
3359 vmx->rmode.segs[seg] = *var; in vmx_set_segment()
3363 fix_rmode_seg(seg, &vmx->rmode.segs[seg]); in vmx_set_segment()
3388 vmx->emulation_required = emulation_required(vcpu); in vmx_set_segment()
3801 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_disable_intercept_for_msr() local
3802 unsigned long *msr_bitmap = vmx->vmcs01.msr_bitmap; in vmx_disable_intercept_for_msr()
3819 clear_bit(idx, vmx->shadow_msr_intercept.read); in vmx_disable_intercept_for_msr()
3821 clear_bit(idx, vmx->shadow_msr_intercept.write); in vmx_disable_intercept_for_msr()
3847 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_enable_intercept_for_msr() local
3848 unsigned long *msr_bitmap = vmx->vmcs01.msr_bitmap; in vmx_enable_intercept_for_msr()
3865 set_bit(idx, vmx->shadow_msr_intercept.read); in vmx_enable_intercept_for_msr()
3867 set_bit(idx, vmx->shadow_msr_intercept.write); in vmx_enable_intercept_for_msr()
3942 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_update_msr_bitmap() local
3944 u8 changed = mode ^ vmx->msr_bitmap_mode; in vmx_update_msr_bitmap()
3952 vmx->msr_bitmap_mode = mode; in vmx_update_msr_bitmap()
3957 struct vcpu_vmx *vmx = to_vmx(vcpu); in pt_update_intercept_for_msr() local
3958 bool flag = !(vmx->pt_desc.guest.ctl & RTIT_CTL_TRACEEN); in pt_update_intercept_for_msr()
3965 for (i = 0; i < vmx->pt_desc.addr_range; i++) { in pt_update_intercept_for_msr()
3973 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_guest_apic_has_interrupt() local
3980 WARN_ON_ONCE(!vmx->nested.virtual_apic_map.gfn)) in vmx_guest_apic_has_interrupt()
3985 vapic_page = vmx->nested.virtual_apic_map.hva; in vmx_guest_apic_has_interrupt()
3993 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_msr_filter_changed() local
4003 bool read = test_bit(i, vmx->shadow_msr_intercept.read); in vmx_msr_filter_changed()
4004 bool write = test_bit(i, vmx->shadow_msr_intercept.write); in vmx_msr_filter_changed()
4056 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_deliver_nested_posted_interrupt() local
4059 vector == vmx->nested.posted_intr_nv) { in vmx_deliver_nested_posted_interrupt()
4064 vmx->nested.pi_pending = true; in vmx_deliver_nested_posted_interrupt()
4082 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_deliver_posted_interrupt() local
4092 if (pi_test_and_set_pir(vector, &vmx->pi_desc)) in vmx_deliver_posted_interrupt()
4096 if (pi_test_and_set_on(&vmx->pi_desc)) in vmx_deliver_posted_interrupt()
4111 void vmx_set_constant_host_state(struct vcpu_vmx *vmx) in vmx_set_constant_host_state() argument
4127 vmx->loaded_vmcs->host_state.cr3 = cr3; in vmx_set_constant_host_state()
4132 vmx->loaded_vmcs->host_state.cr4 = cr4; in vmx_set_constant_host_state()
4168 void set_cr4_guest_host_mask(struct vcpu_vmx *vmx) in set_cr4_guest_host_mask() argument
4170 struct kvm_vcpu *vcpu = &vmx->vcpu; in set_cr4_guest_host_mask()
4176 if (is_guest_mode(&vmx->vcpu)) in set_cr4_guest_host_mask()
4182 u32 vmx_pin_based_exec_ctrl(struct vcpu_vmx *vmx) in vmx_pin_based_exec_ctrl() argument
4186 if (!kvm_vcpu_apicv_active(&vmx->vcpu)) in vmx_pin_based_exec_ctrl()
4200 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_refresh_apicv_exec_ctrl() local
4202 pin_controls_set(vmx, vmx_pin_based_exec_ctrl(vmx)); in vmx_refresh_apicv_exec_ctrl()
4205 secondary_exec_controls_setbit(vmx, in vmx_refresh_apicv_exec_ctrl()
4209 secondary_exec_controls_clearbit(vmx, in vmx_refresh_apicv_exec_ctrl()
4218 u32 vmx_exec_control(struct vcpu_vmx *vmx) in vmx_exec_control() argument
4222 if (vmx->vcpu.arch.switch_db_regs & KVM_DEBUGREG_WONT_EXIT) in vmx_exec_control()
4225 if (!cpu_need_tpr_shadow(&vmx->vcpu)) { in vmx_exec_control()
4236 if (kvm_mwait_in_guest(vmx->vcpu.kvm)) in vmx_exec_control()
4239 if (kvm_hlt_in_guest(vmx->vcpu.kvm)) in vmx_exec_control()
4250 vmx_adjust_secondary_exec_control(struct vcpu_vmx *vmx, u32 *exec_control, in vmx_adjust_secondary_exec_control() argument
4270 vmx->nested.msrs.secondary_ctls_high |= control; in vmx_adjust_secondary_exec_control()
4272 vmx->nested.msrs.secondary_ctls_high &= ~control; in vmx_adjust_secondary_exec_control()
4281 #define vmx_adjust_sec_exec_control(vmx, exec_control, name, feat_name, ctrl_name, exiting) \ argument
4286 __enabled = guest_cpuid_has(&(vmx)->vcpu, \
4288 vmx_adjust_secondary_exec_control(vmx, exec_control, \
4294 #define vmx_adjust_sec_exec_feature(vmx, exec_control, lname, uname) \ argument
4295 vmx_adjust_sec_exec_control(vmx, exec_control, lname, uname, ENABLE_##uname, false)
4297 #define vmx_adjust_sec_exec_exiting(vmx, exec_control, lname, uname) \ argument
4298 vmx_adjust_sec_exec_control(vmx, exec_control, lname, uname, uname##_EXITING, true)
4300 static void vmx_compute_secondary_exec_control(struct vcpu_vmx *vmx) in vmx_compute_secondary_exec_control() argument
4302 struct kvm_vcpu *vcpu = &vmx->vcpu; in vmx_compute_secondary_exec_control()
4310 if (vmx->vpid == 0) in vmx_compute_secondary_exec_control()
4318 if (kvm_pause_in_guest(vmx->vcpu.kvm)) in vmx_compute_secondary_exec_control()
4348 vmx_adjust_secondary_exec_control(vmx, &exec_control, in vmx_compute_secondary_exec_control()
4353 vmx_adjust_sec_exec_feature(vmx, &exec_control, rdtscp, RDTSCP); in vmx_compute_secondary_exec_control()
4363 vmx_adjust_sec_exec_feature(vmx, &exec_control, invpcid, INVPCID); in vmx_compute_secondary_exec_control()
4366 vmx_adjust_sec_exec_exiting(vmx, &exec_control, rdrand, RDRAND); in vmx_compute_secondary_exec_control()
4367 vmx_adjust_sec_exec_exiting(vmx, &exec_control, rdseed, RDSEED); in vmx_compute_secondary_exec_control()
4369 vmx_adjust_sec_exec_control(vmx, &exec_control, waitpkg, WAITPKG, in vmx_compute_secondary_exec_control()
4372 vmx->secondary_exec_control = exec_control; in vmx_compute_secondary_exec_control()
4390 static void init_vmcs(struct vcpu_vmx *vmx) in init_vmcs() argument
4396 vmcs_write64(MSR_BITMAP, __pa(vmx->vmcs01.msr_bitmap)); in init_vmcs()
4401 pin_controls_set(vmx, vmx_pin_based_exec_ctrl(vmx)); in init_vmcs()
4403 exec_controls_set(vmx, vmx_exec_control(vmx)); in init_vmcs()
4406 vmx_compute_secondary_exec_control(vmx); in init_vmcs()
4407 secondary_exec_controls_set(vmx, vmx->secondary_exec_control); in init_vmcs()
4410 if (kvm_vcpu_apicv_active(&vmx->vcpu)) { in init_vmcs()
4419 vmcs_write64(POSTED_INTR_DESC_ADDR, __pa((&vmx->pi_desc))); in init_vmcs()
4422 if (!kvm_pause_in_guest(vmx->vcpu.kvm)) { in init_vmcs()
4424 vmx->ple_window = ple_window; in init_vmcs()
4425 vmx->ple_window_dirty = true; in init_vmcs()
4434 vmx_set_constant_host_state(vmx); in init_vmcs()
4443 vmcs_write64(VM_EXIT_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.host.val)); in init_vmcs()
4445 vmcs_write64(VM_ENTRY_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.guest.val)); in init_vmcs()
4448 vmcs_write64(GUEST_IA32_PAT, vmx->vcpu.arch.pat); in init_vmcs()
4450 vm_exit_controls_set(vmx, vmx_vmexit_ctrl()); in init_vmcs()
4453 vm_entry_controls_set(vmx, vmx_vmentry_ctrl()); in init_vmcs()
4455 vmx->vcpu.arch.cr0_guest_owned_bits = KVM_POSSIBLE_CR0_GUEST_BITS; in init_vmcs()
4456 vmcs_writel(CR0_GUEST_HOST_MASK, ~vmx->vcpu.arch.cr0_guest_owned_bits); in init_vmcs()
4458 set_cr4_guest_host_mask(vmx); in init_vmcs()
4460 if (vmx->vpid != 0) in init_vmcs()
4461 vmcs_write16(VIRTUAL_PROCESSOR_ID, vmx->vpid); in init_vmcs()
4467 vmcs_write64(PML_ADDRESS, page_to_phys(vmx->pml_pg)); in init_vmcs()
4475 memset(&vmx->pt_desc, 0, sizeof(vmx->pt_desc)); in init_vmcs()
4477 vmx->pt_desc.guest.output_mask = 0x7F; in init_vmcs()
4484 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_vcpu_reset() local
4488 vmx->rmode.vm86_active = 0; in vmx_vcpu_reset()
4489 vmx->spec_ctrl = 0; in vmx_vcpu_reset()
4491 vmx->msr_ia32_umwait_control = 0; in vmx_vcpu_reset()
4493 vmx->vcpu.arch.regs[VCPU_REGS_RDX] = get_rdx_init_val(); in vmx_vcpu_reset()
4494 vmx->hv_deadline_tsc = -1; in vmx_vcpu_reset()
4506 vmx_segment_cache_clear(vmx); in vmx_vcpu_reset()
4550 setup_msrs(vmx); in vmx_vcpu_reset()
4565 vmx->vcpu.arch.cr0 = cr0; in vmx_vcpu_reset()
4572 vpid_sync_context(vmx->vpid); in vmx_vcpu_reset()
4576 vmx_update_fb_clear_dis(vcpu, vmx); in vmx_vcpu_reset()
4597 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_inject_irq() local
4604 if (vmx->rmode.vm86_active) { in vmx_inject_irq()
4615 vmx->vcpu.arch.event_exit_inst_len); in vmx_inject_irq()
4625 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_inject_nmi() local
4636 vmx->loaded_vmcs->soft_vnmi_blocked = 1; in vmx_inject_nmi()
4637 vmx->loaded_vmcs->vnmi_blocked_time = 0; in vmx_inject_nmi()
4641 vmx->loaded_vmcs->nmi_known_unmasked = false; in vmx_inject_nmi()
4643 if (vmx->rmode.vm86_active) { in vmx_inject_nmi()
4656 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_get_nmi_mask() local
4660 return vmx->loaded_vmcs->soft_vnmi_blocked; in vmx_get_nmi_mask()
4661 if (vmx->loaded_vmcs->nmi_known_unmasked) in vmx_get_nmi_mask()
4664 vmx->loaded_vmcs->nmi_known_unmasked = !masked; in vmx_get_nmi_mask()
4670 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_set_nmi_mask() local
4673 if (vmx->loaded_vmcs->soft_vnmi_blocked != masked) { in vmx_set_nmi_mask()
4674 vmx->loaded_vmcs->soft_vnmi_blocked = masked; in vmx_set_nmi_mask()
4675 vmx->loaded_vmcs->vnmi_blocked_time = 0; in vmx_set_nmi_mask()
4678 vmx->loaded_vmcs->nmi_known_unmasked = !masked; in vmx_set_nmi_mask()
4865 struct vcpu_vmx *vmx = to_vmx(vcpu); in handle_exception_nmi() local
4871 vect_info = vmx->idt_vectoring_info; in handle_exception_nmi()
4884 if (!vmx->rmode.vm86_active && is_gp_fault(intr_info)) { in handle_exception_nmi()
4932 if (vmx->rmode.vm86_active && rmode_exception(vcpu, ex_no)) in handle_exception_nmi()
4980 vmx->vcpu.arch.event_exit_inst_len = in handle_exception_nmi()
5373 struct vcpu_vmx *vmx = to_vmx(vcpu); in handle_task_switch() local
5380 idt_v = (vmx->idt_vectoring_info & VECTORING_INFO_VALID_MASK); in handle_task_switch()
5381 idt_index = (vmx->idt_vectoring_info & VECTORING_INFO_VECTOR_MASK); in handle_task_switch()
5382 type = (vmx->idt_vectoring_info & VECTORING_INFO_TYPE_MASK); in handle_task_switch()
5398 if (vmx->idt_vectoring_info & in handle_task_switch()
5514 struct vcpu_vmx *vmx = to_vmx(vcpu); in handle_invalid_guest_state() local
5518 intr_window_requested = exec_controls_get(vmx) & in handle_invalid_guest_state()
5521 while (vmx->emulation_required && count-- != 0) { in handle_invalid_guest_state()
5523 return handle_interrupt_window(&vmx->vcpu); in handle_invalid_guest_state()
5531 if (vmx->emulation_required && !vmx->rmode.vm86_active && in handle_invalid_guest_state()
5559 struct vcpu_vmx *vmx = to_vmx(vcpu); in grow_ple_window() local
5560 unsigned int old = vmx->ple_window; in grow_ple_window()
5562 vmx->ple_window = __grow_ple_window(old, ple_window, in grow_ple_window()
5566 if (vmx->ple_window != old) { in grow_ple_window()
5567 vmx->ple_window_dirty = true; in grow_ple_window()
5569 vmx->ple_window, old); in grow_ple_window()
5575 struct vcpu_vmx *vmx = to_vmx(vcpu); in shrink_ple_window() local
5576 unsigned int old = vmx->ple_window; in shrink_ple_window()
5578 vmx->ple_window = __shrink_ple_window(old, ple_window, in shrink_ple_window()
5582 if (vmx->ple_window != old) { in shrink_ple_window()
5583 vmx->ple_window_dirty = true; in shrink_ple_window()
5585 vmx->ple_window, old); in shrink_ple_window()
5709 struct vcpu_vmx *vmx = to_vmx(vcpu); in handle_fastpath_preemption_timer() local
5711 if (!vmx->req_immediate_exit && in handle_fastpath_preemption_timer()
5712 !unlikely(vmx->loaded_vmcs->hv_timer_soft_disabled)) { in handle_fastpath_preemption_timer()
5811 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_get_exit_info() local
5814 if (!(vmx->exit_reason.failed_vmentry)) { in vmx_get_exit_info()
5815 *info2 = vmx->idt_vectoring_info; in vmx_get_exit_info()
5828 static void vmx_destroy_pml_buffer(struct vcpu_vmx *vmx) in vmx_destroy_pml_buffer() argument
5830 if (vmx->pml_pg) { in vmx_destroy_pml_buffer()
5831 __free_page(vmx->pml_pg); in vmx_destroy_pml_buffer()
5832 vmx->pml_pg = NULL; in vmx_destroy_pml_buffer()
5838 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_flush_pml_buffer() local
5854 pml_buf = page_address(vmx->pml_pg); in vmx_flush_pml_buffer()
6054 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_handle_exit() local
6055 union vmx_exit_reason exit_reason = vmx->exit_reason; in vmx_handle_exit()
6056 u32 vectoring_info = vmx->idt_vectoring_info; in vmx_handle_exit()
6075 WARN_ON_ONCE(vmx->nested.nested_run_pending); in vmx_handle_exit()
6078 if (vmx->emulation_required) in vmx_handle_exit()
6108 if (unlikely(vmx->fail)) { in vmx_handle_exit()
6147 vmx->loaded_vmcs->soft_vnmi_blocked)) { in vmx_handle_exit()
6149 vmx->loaded_vmcs->soft_vnmi_blocked = 0; in vmx_handle_exit()
6150 } else if (vmx->loaded_vmcs->vnmi_blocked_time > 1000000000LL && in vmx_handle_exit()
6161 vmx->loaded_vmcs->soft_vnmi_blocked = 0; in vmx_handle_exit()
6293 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_set_virtual_apic_mode() local
6305 vmx->nested.change_vmcs01_virtual_apic_mode = true; in vmx_set_virtual_apic_mode()
6309 sec_exec_control = secondary_exec_controls_get(vmx); in vmx_set_virtual_apic_mode()
6339 secondary_exec_controls_set(vmx, sec_exec_control); in vmx_set_virtual_apic_mode()
6422 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_sync_pir_to_irr() local
6427 if (pi_test_on(&vmx->pi_desc)) { in vmx_sync_pir_to_irr()
6428 pi_clear_on(&vmx->pi_desc); in vmx_sync_pir_to_irr()
6435 kvm_apic_update_irr(vcpu, vmx->pi_desc.pir, &max_irr); in vmx_sync_pir_to_irr()
6466 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_apicv_post_state_restore() local
6468 pi_clear_on(&vmx->pi_desc); in vmx_apicv_post_state_restore()
6469 memset(vmx->pi_desc.pir, 0, sizeof(vmx->pi_desc.pir)); in vmx_apicv_post_state_restore()
6482 static void handle_exception_nmi_irqoff(struct vcpu_vmx *vmx) in handle_exception_nmi_irqoff() argument
6485 u32 intr_info = vmx_get_intr_info(&vmx->vcpu); in handle_exception_nmi_irqoff()
6489 vmx->vcpu.arch.apf.host_apf_flags = kvm_read_and_reset_apf_flags(); in handle_exception_nmi_irqoff()
6495 handle_interrupt_nmi_irqoff(&vmx->vcpu, nmi_entry); in handle_exception_nmi_irqoff()
6513 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_handle_exit_irqoff() local
6515 if (vmx->emulation_required) in vmx_handle_exit_irqoff()
6518 if (vmx->exit_reason.basic == EXIT_REASON_EXTERNAL_INTERRUPT) in vmx_handle_exit_irqoff()
6520 else if (vmx->exit_reason.basic == EXIT_REASON_EXCEPTION_NMI) in vmx_handle_exit_irqoff()
6521 handle_exception_nmi_irqoff(vmx); in vmx_handle_exit_irqoff()
6543 static void vmx_recover_nmi_blocking(struct vcpu_vmx *vmx) in vmx_recover_nmi_blocking() argument
6550 idtv_info_valid = vmx->idt_vectoring_info & VECTORING_INFO_VALID_MASK; in vmx_recover_nmi_blocking()
6553 if (vmx->loaded_vmcs->nmi_known_unmasked) in vmx_recover_nmi_blocking()
6556 exit_intr_info = vmx_get_intr_info(&vmx->vcpu); in vmx_recover_nmi_blocking()
6574 vmx->loaded_vmcs->nmi_known_unmasked = in vmx_recover_nmi_blocking()
6577 } else if (unlikely(vmx->loaded_vmcs->soft_vnmi_blocked)) in vmx_recover_nmi_blocking()
6578 vmx->loaded_vmcs->vnmi_blocked_time += in vmx_recover_nmi_blocking()
6580 vmx->loaded_vmcs->entry_time)); in vmx_recover_nmi_blocking()
6637 static void vmx_complete_interrupts(struct vcpu_vmx *vmx) in vmx_complete_interrupts() argument
6639 __vmx_complete_interrupts(&vmx->vcpu, vmx->idt_vectoring_info, in vmx_complete_interrupts()
6654 static void atomic_switch_perf_msrs(struct vcpu_vmx *vmx) in atomic_switch_perf_msrs() argument
6666 clear_atomic_switch_msr(vmx, msrs[i].msr); in atomic_switch_perf_msrs()
6668 add_atomic_switch_msr(vmx, msrs[i].msr, msrs[i].guest, in atomic_switch_perf_msrs()
6674 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_update_hv_timer() local
6678 if (vmx->req_immediate_exit) { in vmx_update_hv_timer()
6680 vmx->loaded_vmcs->hv_timer_soft_disabled = false; in vmx_update_hv_timer()
6681 } else if (vmx->hv_deadline_tsc != -1) { in vmx_update_hv_timer()
6683 if (vmx->hv_deadline_tsc > tscl) in vmx_update_hv_timer()
6685 delta_tsc = (u32)((vmx->hv_deadline_tsc - tscl) >> in vmx_update_hv_timer()
6691 vmx->loaded_vmcs->hv_timer_soft_disabled = false; in vmx_update_hv_timer()
6692 } else if (!vmx->loaded_vmcs->hv_timer_soft_disabled) { in vmx_update_hv_timer()
6694 vmx->loaded_vmcs->hv_timer_soft_disabled = true; in vmx_update_hv_timer()
6698 void noinstr vmx_update_host_rsp(struct vcpu_vmx *vmx, unsigned long host_rsp) in vmx_update_host_rsp() argument
6700 if (unlikely(host_rsp != vmx->loaded_vmcs->host_state.rsp)) { in vmx_update_host_rsp()
6701 vmx->loaded_vmcs->host_state.rsp = host_rsp; in vmx_update_host_rsp()
6706 void noinstr vmx_spec_ctrl_restore_host(struct vcpu_vmx *vmx, in vmx_spec_ctrl_restore_host() argument
6715 vmx->spec_ctrl = __rdmsr(MSR_IA32_SPEC_CTRL); in vmx_spec_ctrl_restore_host()
6725 vmx->spec_ctrl != hostval) in vmx_spec_ctrl_restore_host()
6744 struct vcpu_vmx *vmx, in vmx_vcpu_enter_exit() argument
6776 vmx_disable_fb_clear(vmx); in vmx_vcpu_enter_exit()
6781 vmx->fail = __vmx_vcpu_run(vmx, (unsigned long *)&vcpu->arch.regs, in vmx_vcpu_enter_exit()
6786 vmx_enable_fb_clear(vmx); in vmx_vcpu_enter_exit()
6811 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_vcpu_run() local
6817 vmx->loaded_vmcs->soft_vnmi_blocked)) in vmx_vcpu_run()
6818 vmx->loaded_vmcs->entry_time = ktime_get(); in vmx_vcpu_run()
6822 if (vmx->emulation_required) in vmx_vcpu_run()
6825 if (vmx->ple_window_dirty) { in vmx_vcpu_run()
6826 vmx->ple_window_dirty = false; in vmx_vcpu_run()
6827 vmcs_write32(PLE_WINDOW, vmx->ple_window); in vmx_vcpu_run()
6834 WARN_ON_ONCE(vmx->nested.need_vmcs12_to_shadow_sync); in vmx_vcpu_run()
6842 if (unlikely(cr3 != vmx->loaded_vmcs->host_state.cr3)) { in vmx_vcpu_run()
6844 vmx->loaded_vmcs->host_state.cr3 = cr3; in vmx_vcpu_run()
6848 if (unlikely(cr4 != vmx->loaded_vmcs->host_state.cr4)) { in vmx_vcpu_run()
6850 vmx->loaded_vmcs->host_state.cr4 = cr4; in vmx_vcpu_run()
6863 pt_guest_enter(vmx); in vmx_vcpu_run()
6865 atomic_switch_perf_msrs(vmx); in vmx_vcpu_run()
6878 x86_spec_ctrl_set_guest(vmx->spec_ctrl, 0); in vmx_vcpu_run()
6881 vmx_vcpu_enter_exit(vcpu, vmx, __vmx_vcpu_run_flags(vmx)); in vmx_vcpu_run()
6892 if (vmx->host_debugctlmsr) in vmx_vcpu_run()
6893 update_debugctlmsr(vmx->host_debugctlmsr); in vmx_vcpu_run()
6910 pt_guest_exit(vmx); in vmx_vcpu_run()
6914 vmx->nested.nested_run_pending = 0; in vmx_vcpu_run()
6915 vmx->idt_vectoring_info = 0; in vmx_vcpu_run()
6917 if (unlikely(vmx->fail)) { in vmx_vcpu_run()
6918 vmx->exit_reason.full = 0xdead; in vmx_vcpu_run()
6922 vmx->exit_reason.full = vmcs_read32(VM_EXIT_REASON); in vmx_vcpu_run()
6923 if (unlikely((u16)vmx->exit_reason.basic == EXIT_REASON_MCE_DURING_VMENTRY)) in vmx_vcpu_run()
6926 trace_kvm_exit(vmx->exit_reason.full, vcpu, KVM_ISA_VMX); in vmx_vcpu_run()
6928 if (unlikely(vmx->exit_reason.failed_vmentry)) in vmx_vcpu_run()
6931 vmx->loaded_vmcs->launched = 1; in vmx_vcpu_run()
6932 vmx->idt_vectoring_info = vmcs_read32(IDT_VECTORING_INFO_FIELD); in vmx_vcpu_run()
6934 vmx_recover_nmi_blocking(vmx); in vmx_vcpu_run()
6935 vmx_complete_interrupts(vmx); in vmx_vcpu_run()
6960 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_free_vcpu() local
6963 vmx_destroy_pml_buffer(vmx); in vmx_free_vcpu()
6964 free_vpid(vmx->vpid); in vmx_free_vcpu()
6966 free_loaded_vmcs(vmx->loaded_vmcs); in vmx_free_vcpu()
6971 struct vcpu_vmx *vmx; in vmx_create_vcpu() local
6975 vmx = to_vmx(vcpu); in vmx_create_vcpu()
6979 vmx->vpid = allocate_vpid(); in vmx_create_vcpu()
6988 vmx->pml_pg = alloc_page(GFP_KERNEL_ACCOUNT | __GFP_ZERO); in vmx_create_vcpu()
6989 if (!vmx->pml_pg) in vmx_create_vcpu()
6997 int j = vmx->nr_uret_msrs; in vmx_create_vcpu()
7002 vmx->guest_uret_msrs[j].slot = i; in vmx_create_vcpu()
7003 vmx->guest_uret_msrs[j].data = 0; in vmx_create_vcpu()
7018 vmx->guest_uret_msrs[j].mask = ~(u64)TSX_CTRL_CPUID_CLEAR; in vmx_create_vcpu()
7020 vmx->guest_uret_msrs[j].mask = 0; in vmx_create_vcpu()
7023 vmx->guest_uret_msrs[j].mask = -1ull; in vmx_create_vcpu()
7026 ++vmx->nr_uret_msrs; in vmx_create_vcpu()
7029 err = alloc_loaded_vmcs(&vmx->vmcs01); in vmx_create_vcpu()
7034 bitmap_fill(vmx->shadow_msr_intercept.read, MAX_POSSIBLE_PASSTHROUGH_MSRS); in vmx_create_vcpu()
7035 bitmap_fill(vmx->shadow_msr_intercept.write, MAX_POSSIBLE_PASSTHROUGH_MSRS); in vmx_create_vcpu()
7052 vmx->msr_bitmap_mode = 0; in vmx_create_vcpu()
7054 vmx->loaded_vmcs = &vmx->vmcs01; in vmx_create_vcpu()
7058 init_vmcs(vmx); in vmx_create_vcpu()
7074 memcpy(&vmx->nested.msrs, &vmcs_config.nested, sizeof(vmx->nested.msrs)); in vmx_create_vcpu()
7076 memset(&vmx->nested.msrs, 0, sizeof(vmx->nested.msrs)); in vmx_create_vcpu()
7078 vmx->nested.posted_intr_nv = -1; in vmx_create_vcpu()
7079 vmx->nested.current_vmptr = -1ull; in vmx_create_vcpu()
7082 vmx->msr_ia32_feature_control_valid_bits = FEAT_CTL_LOCKED; in vmx_create_vcpu()
7088 vmx->pi_desc.nv = POSTED_INTR_VECTOR; in vmx_create_vcpu()
7089 vmx->pi_desc.sn = 1; in vmx_create_vcpu()
7091 vmx->ept_pointer = INVALID_PAGE; in vmx_create_vcpu()
7096 free_loaded_vmcs(vmx->loaded_vmcs); in vmx_create_vcpu()
7098 vmx_destroy_pml_buffer(vmx); in vmx_create_vcpu()
7100 free_vpid(vmx->vpid); in vmx_create_vcpu()
7213 static void vmcs_set_secondary_exec_control(struct vcpu_vmx *vmx) in vmcs_set_secondary_exec_control() argument
7227 u32 new_ctl = vmx->secondary_exec_control; in vmcs_set_secondary_exec_control()
7228 u32 cur_ctl = secondary_exec_controls_get(vmx); in vmcs_set_secondary_exec_control()
7230 secondary_exec_controls_set(vmx, (new_ctl & ~mask) | (cur_ctl & mask)); in vmcs_set_secondary_exec_control()
7239 struct vcpu_vmx *vmx = to_vmx(vcpu); in nested_vmx_cr_fixed1_bits_update() local
7242 vmx->nested.msrs.cr0_fixed1 = 0xffffffff; in nested_vmx_cr_fixed1_bits_update()
7243 vmx->nested.msrs.cr4_fixed1 = X86_CR4_PCE; in nested_vmx_cr_fixed1_bits_update()
7247 vmx->nested.msrs.cr4_fixed1 |= (_cr4_mask); \ in nested_vmx_cr_fixed1_bits_update()
7279 struct vcpu_vmx *vmx = to_vmx(vcpu); in nested_vmx_entry_exit_ctls_update() local
7285 vmx->nested.msrs.entry_ctls_high |= VM_ENTRY_LOAD_BNDCFGS; in nested_vmx_entry_exit_ctls_update()
7286 vmx->nested.msrs.exit_ctls_high |= VM_EXIT_CLEAR_BNDCFGS; in nested_vmx_entry_exit_ctls_update()
7288 vmx->nested.msrs.entry_ctls_high &= ~VM_ENTRY_LOAD_BNDCFGS; in nested_vmx_entry_exit_ctls_update()
7289 vmx->nested.msrs.exit_ctls_high &= ~VM_EXIT_CLEAR_BNDCFGS; in nested_vmx_entry_exit_ctls_update()
7296 struct vcpu_vmx *vmx = to_vmx(vcpu); in update_intel_pt_cfg() local
7304 vmx->pt_desc.caps[CPUID_EAX + i*PT_CPUID_REGS_NUM] = best->eax; in update_intel_pt_cfg()
7305 vmx->pt_desc.caps[CPUID_EBX + i*PT_CPUID_REGS_NUM] = best->ebx; in update_intel_pt_cfg()
7306 vmx->pt_desc.caps[CPUID_ECX + i*PT_CPUID_REGS_NUM] = best->ecx; in update_intel_pt_cfg()
7307 vmx->pt_desc.caps[CPUID_EDX + i*PT_CPUID_REGS_NUM] = best->edx; in update_intel_pt_cfg()
7311 vmx->pt_desc.addr_range = intel_pt_validate_cap(vmx->pt_desc.caps, in update_intel_pt_cfg()
7315 vmx->pt_desc.ctl_bitmask = ~(RTIT_CTL_TRACEEN | RTIT_CTL_OS | in update_intel_pt_cfg()
7322 if (intel_pt_validate_cap(vmx->pt_desc.caps, PT_CAP_cr3_filtering)) in update_intel_pt_cfg()
7323 vmx->pt_desc.ctl_bitmask &= ~RTIT_CTL_CR3EN; in update_intel_pt_cfg()
7329 if (intel_pt_validate_cap(vmx->pt_desc.caps, PT_CAP_psb_cyc)) in update_intel_pt_cfg()
7330 vmx->pt_desc.ctl_bitmask &= ~(RTIT_CTL_CYCLEACC | in update_intel_pt_cfg()
7337 if (intel_pt_validate_cap(vmx->pt_desc.caps, PT_CAP_mtc)) in update_intel_pt_cfg()
7338 vmx->pt_desc.ctl_bitmask &= ~(RTIT_CTL_MTC_EN | in update_intel_pt_cfg()
7342 if (intel_pt_validate_cap(vmx->pt_desc.caps, PT_CAP_ptwrite)) in update_intel_pt_cfg()
7343 vmx->pt_desc.ctl_bitmask &= ~(RTIT_CTL_FUP_ON_PTW | in update_intel_pt_cfg()
7347 if (intel_pt_validate_cap(vmx->pt_desc.caps, PT_CAP_power_event_trace)) in update_intel_pt_cfg()
7348 vmx->pt_desc.ctl_bitmask &= ~RTIT_CTL_PWR_EVT_EN; in update_intel_pt_cfg()
7351 if (intel_pt_validate_cap(vmx->pt_desc.caps, PT_CAP_topa_output)) in update_intel_pt_cfg()
7352 vmx->pt_desc.ctl_bitmask &= ~RTIT_CTL_TOPA; in update_intel_pt_cfg()
7355 if (intel_pt_validate_cap(vmx->pt_desc.caps, PT_CAP_output_subsys)) in update_intel_pt_cfg()
7356 vmx->pt_desc.ctl_bitmask &= ~RTIT_CTL_FABRIC_EN; in update_intel_pt_cfg()
7359 for (i = 0; i < vmx->pt_desc.addr_range; i++) in update_intel_pt_cfg()
7360 vmx->pt_desc.ctl_bitmask &= ~(0xfULL << (32 + i * 4)); in update_intel_pt_cfg()
7365 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_vcpu_after_set_cpuid() local
7371 vmx_compute_secondary_exec_control(vmx); in vmx_vcpu_after_set_cpuid()
7372 vmcs_set_secondary_exec_control(vmx); in vmx_vcpu_after_set_cpuid()
7395 msr = vmx_find_uret_msr(vmx, MSR_IA32_TSX_CTRL); in vmx_vcpu_after_set_cpuid()
7398 vmx_set_guest_uret_msr(vmx, msr, enabled ? 0 : TSX_CTRL_RTM_DISABLE); in vmx_vcpu_after_set_cpuid()
7402 set_cr4_guest_host_mask(vmx); in vmx_vcpu_after_set_cpuid()
7552 struct vcpu_vmx *vmx; in vmx_set_hv_timer() local
7556 vmx = to_vmx(vcpu); in vmx_set_hv_timer()
7584 vmx->hv_deadline_tsc = tscl + delta_tsc; in vmx_set_hv_timer()
7666 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_pre_enter_smm() local
7668 vmx->nested.smm.guest_mode = is_guest_mode(vcpu); in vmx_pre_enter_smm()
7669 if (vmx->nested.smm.guest_mode) in vmx_pre_enter_smm()
7672 vmx->nested.smm.vmxon = vmx->nested.vmxon; in vmx_pre_enter_smm()
7673 vmx->nested.vmxon = false; in vmx_pre_enter_smm()
7680 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_pre_leave_smm() local
7683 if (vmx->nested.smm.vmxon) { in vmx_pre_leave_smm()
7684 vmx->nested.vmxon = true; in vmx_pre_leave_smm()
7685 vmx->nested.smm.vmxon = false; in vmx_pre_leave_smm()
7688 if (vmx->nested.smm.guest_mode) { in vmx_pre_leave_smm()
7693 vmx->nested.smm.guest_mode = false; in vmx_pre_leave_smm()