Lines Matching refs:vmx

189 	struct vcpu_vmx *vmx = to_vmx(vcpu);  in nested_vmx_fail()  local
195 if (vmx->nested.current_vmptr == -1ull && !vmx->nested.hv_evmcs) in nested_vmx_fail()
218 static void vmx_disable_shadow_vmcs(struct vcpu_vmx *vmx) in vmx_disable_shadow_vmcs() argument
220 secondary_exec_controls_clearbit(vmx, SECONDARY_EXEC_SHADOW_VMCS); in vmx_disable_shadow_vmcs()
222 vmx->nested.need_vmcs12_to_shadow_sync = false; in vmx_disable_shadow_vmcs()
227 struct vcpu_vmx *vmx = to_vmx(vcpu); in nested_release_evmcs() local
229 if (!vmx->nested.hv_evmcs) in nested_release_evmcs()
232 kvm_vcpu_unmap(vcpu, &vmx->nested.hv_evmcs_map, true); in nested_release_evmcs()
233 vmx->nested.hv_evmcs_vmptr = 0; in nested_release_evmcs()
234 vmx->nested.hv_evmcs = NULL; in nested_release_evmcs()
237 static void vmx_sync_vmcs_host_state(struct vcpu_vmx *vmx, in vmx_sync_vmcs_host_state() argument
242 if (unlikely(!vmx->guest_state_loaded)) in vmx_sync_vmcs_host_state()
246 dest = &vmx->loaded_vmcs->host_state; in vmx_sync_vmcs_host_state()
258 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_switch_vmcs() local
262 if (WARN_ON_ONCE(vmx->loaded_vmcs == vmcs)) in vmx_switch_vmcs()
266 prev = vmx->loaded_vmcs; in vmx_switch_vmcs()
267 vmx->loaded_vmcs = vmcs; in vmx_switch_vmcs()
269 vmx_sync_vmcs_host_state(vmx, prev); in vmx_switch_vmcs()
281 struct vcpu_vmx *vmx = to_vmx(vcpu); in free_nested() local
283 if (WARN_ON_ONCE(vmx->loaded_vmcs != &vmx->vmcs01)) in free_nested()
284 vmx_switch_vmcs(vcpu, &vmx->vmcs01); in free_nested()
286 if (!vmx->nested.vmxon && !vmx->nested.smm.vmxon) in free_nested()
291 vmx->nested.vmxon = false; in free_nested()
292 vmx->nested.smm.vmxon = false; in free_nested()
293 free_vpid(vmx->nested.vpid02); in free_nested()
294 vmx->nested.posted_intr_nv = -1; in free_nested()
295 vmx->nested.current_vmptr = -1ull; in free_nested()
297 vmx_disable_shadow_vmcs(vmx); in free_nested()
298 vmcs_clear(vmx->vmcs01.shadow_vmcs); in free_nested()
299 free_vmcs(vmx->vmcs01.shadow_vmcs); in free_nested()
300 vmx->vmcs01.shadow_vmcs = NULL; in free_nested()
302 kfree(vmx->nested.cached_vmcs12); in free_nested()
303 vmx->nested.cached_vmcs12 = NULL; in free_nested()
304 kfree(vmx->nested.cached_shadow_vmcs12); in free_nested()
305 vmx->nested.cached_shadow_vmcs12 = NULL; in free_nested()
307 if (vmx->nested.apic_access_page) { in free_nested()
308 kvm_release_page_clean(vmx->nested.apic_access_page); in free_nested()
309 vmx->nested.apic_access_page = NULL; in free_nested()
311 kvm_vcpu_unmap(vcpu, &vmx->nested.virtual_apic_map, true); in free_nested()
312 kvm_vcpu_unmap(vcpu, &vmx->nested.pi_desc_map, true); in free_nested()
313 vmx->nested.pi_desc = NULL; in free_nested()
319 free_loaded_vmcs(&vmx->nested.vmcs02); in free_nested()
337 struct vcpu_vmx *vmx = to_vmx(vcpu); in nested_ept_inject_page_fault() local
341 if (vmx->nested.pml_full) { in nested_ept_inject_page_fault()
343 vmx->nested.pml_full = false; in nested_ept_inject_page_fault()
685 struct vcpu_vmx *vmx = to_vmx(vcpu); in nested_flush_cached_shadow_vmcs12() local
691 kvm_write_guest(vmx->vcpu.kvm, vmcs12->vmcs_link_pointer, in nested_flush_cached_shadow_vmcs12()
881 struct vcpu_vmx *vmx = to_vmx(vcpu); in nested_vmx_max_atomic_switch_msrs() local
882 u64 vmx_misc = vmx_control_msr(vmx->nested.msrs.misc_low, in nested_vmx_max_atomic_switch_msrs()
883 vmx->nested.msrs.misc_high); in nested_vmx_max_atomic_switch_msrs()
937 struct vcpu_vmx *vmx = to_vmx(vcpu); in nested_vmx_get_vmexit_msr_value() local
945 int i = vmx_find_loadstore_msr_slot(&vmx->msr_autostore.guest, in nested_vmx_get_vmexit_msr_value()
949 u64 val = vmx->msr_autostore.guest.val[i].value; in nested_vmx_get_vmexit_msr_value()
1035 struct vcpu_vmx *vmx = to_vmx(vcpu); in prepare_vmx_msr_autostore_list() local
1036 struct vmx_msrs *autostore = &vmx->msr_autostore.guest; in prepare_vmx_msr_autostore_list()
1193 struct vcpu_vmx *vmx = to_vmx(vcpu); in nested_vmx_transition_tlb_flush() local
1227 vmcs12->virtual_processor_id != vmx->nested.last_vpid) { in nested_vmx_transition_tlb_flush()
1228 vmx->nested.last_vpid = vmcs12->virtual_processor_id; in nested_vmx_transition_tlb_flush()
1241 static int vmx_restore_vmx_basic(struct vcpu_vmx *vmx, u64 data) in vmx_restore_vmx_basic() argument
1267 vmx->nested.msrs.basic = data; in vmx_restore_vmx_basic()
1301 vmx_restore_control_msr(struct vcpu_vmx *vmx, u32 msr_index, u64 data) in vmx_restore_control_msr() argument
1318 vmx_get_control_msr(&vmx->nested.msrs, msr_index, &lowp, &highp); in vmx_restore_control_msr()
1324 static int vmx_restore_vmx_misc(struct vcpu_vmx *vmx, u64 data) in vmx_restore_vmx_misc() argument
1338 if ((vmx->nested.msrs.pinbased_ctls_high & in vmx_restore_vmx_misc()
1353 vmx->nested.msrs.misc_low = data; in vmx_restore_vmx_misc()
1354 vmx->nested.msrs.misc_high = data >> 32; in vmx_restore_vmx_misc()
1359 static int vmx_restore_vmx_ept_vpid_cap(struct vcpu_vmx *vmx, u64 data) in vmx_restore_vmx_ept_vpid_cap() argument
1368 vmx->nested.msrs.ept_caps = data; in vmx_restore_vmx_ept_vpid_cap()
1369 vmx->nested.msrs.vpid_caps = data >> 32; in vmx_restore_vmx_ept_vpid_cap()
1385 static int vmx_restore_fixed0_msr(struct vcpu_vmx *vmx, u32 msr_index, u64 data) in vmx_restore_fixed0_msr() argument
1396 *vmx_get_fixed0_msr(&vmx->nested.msrs, msr_index) = data; in vmx_restore_fixed0_msr()
1407 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_set_vmx_msr() local
1413 if (vmx->nested.vmxon) in vmx_set_vmx_msr()
1418 return vmx_restore_vmx_basic(vmx, data); in vmx_set_vmx_msr()
1438 return vmx_restore_control_msr(vmx, msr_index, data); in vmx_set_vmx_msr()
1440 return vmx_restore_vmx_misc(vmx, data); in vmx_set_vmx_msr()
1443 return vmx_restore_fixed0_msr(vmx, msr_index, data); in vmx_set_vmx_msr()
1452 return vmx_restore_vmx_ept_vpid_cap(vmx, data); in vmx_set_vmx_msr()
1454 vmx->nested.msrs.vmcs_enum = data; in vmx_set_vmx_msr()
1459 vmx->nested.msrs.vmfunc_controls = data; in vmx_set_vmx_msr()
1555 static void copy_shadow_to_vmcs12(struct vcpu_vmx *vmx) in copy_shadow_to_vmcs12() argument
1557 struct vmcs *shadow_vmcs = vmx->vmcs01.shadow_vmcs; in copy_shadow_to_vmcs12()
1558 struct vmcs12 *vmcs12 = get_vmcs12(&vmx->vcpu); in copy_shadow_to_vmcs12()
1577 vmcs_load(vmx->loaded_vmcs->vmcs); in copy_shadow_to_vmcs12()
1582 static void copy_vmcs12_to_shadow(struct vcpu_vmx *vmx) in copy_vmcs12_to_shadow() argument
1592 struct vmcs *shadow_vmcs = vmx->vmcs01.shadow_vmcs; in copy_vmcs12_to_shadow()
1593 struct vmcs12 *vmcs12 = get_vmcs12(&vmx->vcpu); in copy_vmcs12_to_shadow()
1613 vmcs_load(vmx->loaded_vmcs->vmcs); in copy_vmcs12_to_shadow()
1616 static int copy_enlightened_to_vmcs12(struct vcpu_vmx *vmx) in copy_enlightened_to_vmcs12() argument
1618 struct vmcs12 *vmcs12 = vmx->nested.cached_vmcs12; in copy_enlightened_to_vmcs12()
1619 struct hv_enlightened_vmcs *evmcs = vmx->nested.hv_evmcs; in copy_enlightened_to_vmcs12()
1832 static int copy_vmcs12_to_enlightened(struct vcpu_vmx *vmx) in copy_vmcs12_to_enlightened() argument
1834 struct vmcs12 *vmcs12 = vmx->nested.cached_vmcs12; in copy_vmcs12_to_enlightened()
1835 struct hv_enlightened_vmcs *evmcs = vmx->nested.hv_evmcs; in copy_vmcs12_to_enlightened()
2002 struct vcpu_vmx *vmx = to_vmx(vcpu); in nested_vmx_handle_enlightened_vmptrld() local
2006 if (likely(!vmx->nested.enlightened_vmcs_enabled)) in nested_vmx_handle_enlightened_vmptrld()
2012 if (unlikely(!vmx->nested.hv_evmcs || in nested_vmx_handle_enlightened_vmptrld()
2013 evmcs_gpa != vmx->nested.hv_evmcs_vmptr)) { in nested_vmx_handle_enlightened_vmptrld()
2014 if (!vmx->nested.hv_evmcs) in nested_vmx_handle_enlightened_vmptrld()
2015 vmx->nested.current_vmptr = -1ull; in nested_vmx_handle_enlightened_vmptrld()
2020 &vmx->nested.hv_evmcs_map)) in nested_vmx_handle_enlightened_vmptrld()
2023 vmx->nested.hv_evmcs = vmx->nested.hv_evmcs_map.hva; in nested_vmx_handle_enlightened_vmptrld()
2047 if ((vmx->nested.hv_evmcs->revision_id != KVM_EVMCS_VERSION) && in nested_vmx_handle_enlightened_vmptrld()
2048 (vmx->nested.hv_evmcs->revision_id != VMCS12_REVISION)) { in nested_vmx_handle_enlightened_vmptrld()
2053 vmx->nested.dirty_vmcs12 = true; in nested_vmx_handle_enlightened_vmptrld()
2054 vmx->nested.hv_evmcs_vmptr = evmcs_gpa; in nested_vmx_handle_enlightened_vmptrld()
2076 vmx->nested.hv_evmcs->hv_clean_fields &= in nested_vmx_handle_enlightened_vmptrld()
2084 struct vcpu_vmx *vmx = to_vmx(vcpu); in nested_sync_vmcs12_to_shadow() local
2086 if (vmx->nested.hv_evmcs) { in nested_sync_vmcs12_to_shadow()
2087 copy_vmcs12_to_enlightened(vmx); in nested_sync_vmcs12_to_shadow()
2089 vmx->nested.hv_evmcs->hv_clean_fields |= in nested_sync_vmcs12_to_shadow()
2092 copy_vmcs12_to_shadow(vmx); in nested_sync_vmcs12_to_shadow()
2095 vmx->nested.need_vmcs12_to_shadow_sync = false; in nested_sync_vmcs12_to_shadow()
2100 struct vcpu_vmx *vmx = in vmx_preemption_timer_fn() local
2103 vmx->nested.preemption_timer_expired = true; in vmx_preemption_timer_fn()
2104 kvm_make_request(KVM_REQ_EVENT, &vmx->vcpu); in vmx_preemption_timer_fn()
2105 kvm_vcpu_kick(&vmx->vcpu); in vmx_preemption_timer_fn()
2112 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_calc_preemption_timer_value() local
2118 if (!vmx->nested.has_preemption_timer_deadline) { in vmx_calc_preemption_timer_value()
2119 vmx->nested.preemption_timer_deadline = in vmx_calc_preemption_timer_value()
2121 vmx->nested.has_preemption_timer_deadline = true; in vmx_calc_preemption_timer_value()
2123 return vmx->nested.preemption_timer_deadline - l1_scaled_tsc; in vmx_calc_preemption_timer_value()
2129 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_start_preemption_timer() local
2136 vmx_preemption_timer_fn(&vmx->nested.preemption_timer); in vmx_start_preemption_timer()
2146 hrtimer_start(&vmx->nested.preemption_timer, in vmx_start_preemption_timer()
2151 static u64 nested_vmx_calc_efer(struct vcpu_vmx *vmx, struct vmcs12 *vmcs12) in nested_vmx_calc_efer() argument
2153 if (vmx->nested.nested_run_pending && in nested_vmx_calc_efer()
2157 return vmx->vcpu.arch.efer | (EFER_LMA | EFER_LME); in nested_vmx_calc_efer()
2159 return vmx->vcpu.arch.efer & ~(EFER_LMA | EFER_LME); in nested_vmx_calc_efer()
2162 static void prepare_vmcs02_constant_state(struct vcpu_vmx *vmx) in prepare_vmcs02_constant_state() argument
2170 if (vmx->nested.vmcs02_initialized) in prepare_vmcs02_constant_state()
2172 vmx->nested.vmcs02_initialized = true; in prepare_vmcs02_constant_state()
2181 construct_eptp(&vmx->vcpu, 0, PT64_ROOT_4LEVEL)); in prepare_vmcs02_constant_state()
2191 vmcs_write64(MSR_BITMAP, __pa(vmx->nested.vmcs02.msr_bitmap)); in prepare_vmcs02_constant_state()
2201 vmcs_write64(PML_ADDRESS, page_to_phys(vmx->pml_pg)); in prepare_vmcs02_constant_state()
2213 vmcs_write64(VM_EXIT_MSR_STORE_ADDR, __pa(vmx->msr_autostore.guest.val)); in prepare_vmcs02_constant_state()
2214 vmcs_write64(VM_EXIT_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.host.val)); in prepare_vmcs02_constant_state()
2215 vmcs_write64(VM_ENTRY_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.guest.val)); in prepare_vmcs02_constant_state()
2217 vmx_set_constant_host_state(vmx); in prepare_vmcs02_constant_state()
2220 static void prepare_vmcs02_early_rare(struct vcpu_vmx *vmx, in prepare_vmcs02_early_rare() argument
2223 prepare_vmcs02_constant_state(vmx); in prepare_vmcs02_early_rare()
2228 if (nested_cpu_has_vpid(vmcs12) && vmx->nested.vpid02) in prepare_vmcs02_early_rare()
2229 vmcs_write16(VIRTUAL_PROCESSOR_ID, vmx->nested.vpid02); in prepare_vmcs02_early_rare()
2231 vmcs_write16(VIRTUAL_PROCESSOR_ID, vmx->vpid); in prepare_vmcs02_early_rare()
2235 static void prepare_vmcs02_early(struct vcpu_vmx *vmx, struct loaded_vmcs *vmcs01, in prepare_vmcs02_early() argument
2239 u64 guest_efer = nested_vmx_calc_efer(vmx, vmcs12); in prepare_vmcs02_early()
2241 if (vmx->nested.dirty_vmcs12 || vmx->nested.hv_evmcs) in prepare_vmcs02_early()
2242 prepare_vmcs02_early_rare(vmx, vmcs12); in prepare_vmcs02_early()
2252 vmx->nested.pi_pending = false; in prepare_vmcs02_early()
2254 vmx->nested.posted_intr_nv = vmcs12->posted_intr_nv; in prepare_vmcs02_early()
2257 pin_controls_set(vmx, exec_control); in prepare_vmcs02_early()
2268 vmx->nested.l1_tpr_threshold = -1; in prepare_vmcs02_early()
2291 exec_control |= exec_controls_get(vmx) & CPU_BASED_USE_MSR_BITMAPS; in prepare_vmcs02_early()
2293 exec_controls_set(vmx, exec_control); in prepare_vmcs02_early()
2338 secondary_exec_controls_set(vmx, exec_control); in prepare_vmcs02_early()
2363 vm_entry_controls_set(vmx, exec_control); in prepare_vmcs02_early()
2377 vm_exit_controls_set(vmx, exec_control); in prepare_vmcs02_early()
2382 if (vmx->nested.nested_run_pending) { in prepare_vmcs02_early()
2391 vmx->loaded_vmcs->nmi_known_unmasked = in prepare_vmcs02_early()
2398 static void prepare_vmcs02_rare(struct vcpu_vmx *vmx, struct vmcs12 *vmcs12) in prepare_vmcs02_rare() argument
2400 struct hv_enlightened_vmcs *hv_evmcs = vmx->nested.hv_evmcs; in prepare_vmcs02_rare()
2441 vmx->segment_cache.bitmask = 0; in prepare_vmcs02_rare()
2463 if (kvm_mpx_supported() && vmx->nested.nested_run_pending && in prepare_vmcs02_rare()
2484 if (vmx_need_pf_intercept(&vmx->vcpu)) { in prepare_vmcs02_rare()
2507 prepare_vmx_msr_autostore_list(&vmx->vcpu, MSR_IA32_TSC); in prepare_vmcs02_rare()
2509 vmcs_write32(VM_EXIT_MSR_STORE_COUNT, vmx->msr_autostore.guest.nr); in prepare_vmcs02_rare()
2510 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, vmx->msr_autoload.host.nr); in prepare_vmcs02_rare()
2511 vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, vmx->msr_autoload.guest.nr); in prepare_vmcs02_rare()
2513 set_cr4_guest_host_mask(vmx); in prepare_vmcs02_rare()
2530 struct vcpu_vmx *vmx = to_vmx(vcpu); in prepare_vmcs02() local
2531 struct hv_enlightened_vmcs *hv_evmcs = vmx->nested.hv_evmcs; in prepare_vmcs02()
2534 if (vmx->nested.dirty_vmcs12 || hv_evmcs) { in prepare_vmcs02()
2535 prepare_vmcs02_rare(vmx, vmcs12); in prepare_vmcs02()
2536 vmx->nested.dirty_vmcs12 = false; in prepare_vmcs02()
2543 if (vmx->nested.nested_run_pending && in prepare_vmcs02()
2549 vmcs_write64(GUEST_IA32_DEBUGCTL, vmx->nested.vmcs01_debugctl); in prepare_vmcs02()
2551 if (kvm_mpx_supported() && (!vmx->nested.nested_run_pending || in prepare_vmcs02()
2553 vmcs_write64(GUEST_BNDCFGS, vmx->nested.vmcs01_guest_bndcfgs); in prepare_vmcs02()
2564 if (vmx->nested.nested_run_pending && in prepare_vmcs02()
2569 vmcs_write64(GUEST_IA32_PAT, vmx->vcpu.arch.pat); in prepare_vmcs02()
2575 decache_tsc_multiplier(vmx); in prepare_vmcs02()
2596 vcpu->arch.efer = nested_vmx_calc_efer(vmx, vmcs12); in prepare_vmcs02()
2664 struct vcpu_vmx *vmx = to_vmx(vcpu); in nested_vmx_check_eptp() local
2670 if (CC(!(vmx->nested.msrs.ept_caps & VMX_EPTP_UC_BIT))) in nested_vmx_check_eptp()
2674 if (CC(!(vmx->nested.msrs.ept_caps & VMX_EPTP_WB_BIT))) in nested_vmx_check_eptp()
2684 if (CC(!(vmx->nested.msrs.ept_caps & VMX_EPT_PAGE_WALK_5_BIT))) in nested_vmx_check_eptp()
2688 if (CC(!(vmx->nested.msrs.ept_caps & VMX_EPT_PAGE_WALK_4_BIT))) in nested_vmx_check_eptp()
2701 if (CC(!(vmx->nested.msrs.ept_caps & VMX_EPT_AD_BIT))) in nested_vmx_check_eptp()
2714 struct vcpu_vmx *vmx = to_vmx(vcpu); in nested_check_vm_execution_controls() local
2717 vmx->nested.msrs.pinbased_ctls_low, in nested_check_vm_execution_controls()
2718 vmx->nested.msrs.pinbased_ctls_high)) || in nested_check_vm_execution_controls()
2720 vmx->nested.msrs.procbased_ctls_low, in nested_check_vm_execution_controls()
2721 vmx->nested.msrs.procbased_ctls_high))) in nested_check_vm_execution_controls()
2726 vmx->nested.msrs.secondary_ctls_low, in nested_check_vm_execution_controls()
2727 vmx->nested.msrs.secondary_ctls_high))) in nested_check_vm_execution_controls()
2754 ~vmx->nested.msrs.vmfunc_controls)) in nested_check_vm_execution_controls()
2773 struct vcpu_vmx *vmx = to_vmx(vcpu); in nested_check_vm_exit_controls() local
2776 vmx->nested.msrs.exit_ctls_low, in nested_check_vm_exit_controls()
2777 vmx->nested.msrs.exit_ctls_high)) || in nested_check_vm_exit_controls()
2790 struct vcpu_vmx *vmx = to_vmx(vcpu); in nested_check_vm_entry_controls() local
2793 vmx->nested.msrs.entry_ctls_low, in nested_check_vm_entry_controls()
2794 vmx->nested.msrs.entry_ctls_high))) in nested_check_vm_entry_controls()
3059 struct vcpu_vmx *vmx = to_vmx(vcpu); in nested_vmx_check_vmentry_hw() local
3066 if (vmx->msr_autoload.host.nr) in nested_vmx_check_vmentry_hw()
3068 if (vmx->msr_autoload.guest.nr) in nested_vmx_check_vmentry_hw()
3084 if (unlikely(cr3 != vmx->loaded_vmcs->host_state.cr3)) { in nested_vmx_check_vmentry_hw()
3086 vmx->loaded_vmcs->host_state.cr3 = cr3; in nested_vmx_check_vmentry_hw()
3090 if (unlikely(cr4 != vmx->loaded_vmcs->host_state.cr4)) { in nested_vmx_check_vmentry_hw()
3092 vmx->loaded_vmcs->host_state.cr4 = cr4; in nested_vmx_check_vmentry_hw()
3095 vm_fail = __vmx_vcpu_run(vmx, (unsigned long *)&vcpu->arch.regs, in nested_vmx_check_vmentry_hw()
3096 __vmx_vcpu_run_flags(vmx)); in nested_vmx_check_vmentry_hw()
3098 if (vmx->msr_autoload.host.nr) in nested_vmx_check_vmentry_hw()
3099 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, vmx->msr_autoload.host.nr); in nested_vmx_check_vmentry_hw()
3100 if (vmx->msr_autoload.guest.nr) in nested_vmx_check_vmentry_hw()
3101 vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, vmx->msr_autoload.guest.nr); in nested_vmx_check_vmentry_hw()
3137 struct vcpu_vmx *vmx = to_vmx(vcpu); in nested_get_evmcs_page() local
3144 if (vmx->nested.enlightened_vmcs_enabled && !vmx->nested.hv_evmcs) { in nested_get_evmcs_page()
3159 struct vcpu_vmx *vmx = to_vmx(vcpu); in nested_get_vmcs12_pages() local
3171 if (vmx->nested.apic_access_page) { /* shouldn't happen */ in nested_get_vmcs12_pages()
3172 kvm_release_page_clean(vmx->nested.apic_access_page); in nested_get_vmcs12_pages()
3173 vmx->nested.apic_access_page = NULL; in nested_get_vmcs12_pages()
3177 vmx->nested.apic_access_page = page; in nested_get_vmcs12_pages()
3178 hpa = page_to_phys(vmx->nested.apic_access_page); in nested_get_vmcs12_pages()
3192 map = &vmx->nested.virtual_apic_map; in nested_get_vmcs12_pages()
3207 exec_controls_clearbit(vmx, CPU_BASED_TPR_SHADOW); in nested_get_vmcs12_pages()
3218 map = &vmx->nested.pi_desc_map; in nested_get_vmcs12_pages()
3221 vmx->nested.pi_desc = in nested_get_vmcs12_pages()
3229 exec_controls_setbit(vmx, CPU_BASED_USE_MSR_BITMAPS); in nested_get_vmcs12_pages()
3231 exec_controls_clearbit(vmx, CPU_BASED_USE_MSR_BITMAPS); in nested_get_vmcs12_pages()
3258 struct vcpu_vmx *vmx = to_vmx(vcpu); in nested_vmx_write_pml_buffer() local
3264 if (WARN_ON_ONCE(vmx->nested.pml_full)) in nested_vmx_write_pml_buffer()
3276 vmx->nested.pml_full = true; in nested_vmx_write_pml_buffer()
3338 struct vcpu_vmx *vmx = to_vmx(vcpu); in nested_vmx_enter_non_root_mode() local
3351 evaluate_pending_interrupts = exec_controls_get(vmx) & in nested_vmx_enter_non_root_mode()
3356 if (!vmx->nested.nested_run_pending || in nested_vmx_enter_non_root_mode()
3358 vmx->nested.vmcs01_debugctl = vmcs_read64(GUEST_IA32_DEBUGCTL); in nested_vmx_enter_non_root_mode()
3360 (!vmx->nested.nested_run_pending || in nested_vmx_enter_non_root_mode()
3362 vmx->nested.vmcs01_guest_bndcfgs = vmcs_read64(GUEST_BNDCFGS); in nested_vmx_enter_non_root_mode()
3383 vmx_switch_vmcs(vcpu, &vmx->nested.vmcs02); in nested_vmx_enter_non_root_mode()
3385 prepare_vmcs02_early(vmx, &vmx->vmcs01, vmcs12); in nested_vmx_enter_non_root_mode()
3389 vmx_switch_vmcs(vcpu, &vmx->vmcs01); in nested_vmx_enter_non_root_mode()
3394 vmx_switch_vmcs(vcpu, &vmx->vmcs01); in nested_vmx_enter_non_root_mode()
3458 vmx->nested.preemption_timer_expired = false; in nested_vmx_enter_non_root_mode()
3483 vmx_switch_vmcs(vcpu, &vmx->vmcs01); in nested_vmx_enter_non_root_mode()
3490 if (enable_shadow_vmcs || vmx->nested.hv_evmcs) in nested_vmx_enter_non_root_mode()
3491 vmx->nested.need_vmcs12_to_shadow_sync = true; in nested_vmx_enter_non_root_mode()
3503 struct vcpu_vmx *vmx = to_vmx(vcpu); in nested_vmx_run() local
3518 if (CC(!vmx->nested.hv_evmcs && vmx->nested.current_vmptr == -1ull)) in nested_vmx_run()
3532 if (vmx->nested.hv_evmcs) { in nested_vmx_run()
3533 copy_enlightened_to_vmcs12(vmx); in nested_vmx_run()
3537 copy_shadow_to_vmcs12(vmx); in nested_vmx_run()
3571 vmx->nested.nested_run_pending = 1; in nested_vmx_run()
3572 vmx->nested.has_preemption_timer_deadline = false; in nested_vmx_run()
3579 kvm_apic_has_interrupt(vcpu) == vmx->nested.posted_intr_nv) { in nested_vmx_run()
3580 vmx->nested.pi_pending = true; in nested_vmx_run()
3582 kvm_apic_clear_irr(vcpu, vmx->nested.posted_intr_nv); in nested_vmx_run()
3586 vmx->vcpu.arch.l1tf_flush_l1d = true; in nested_vmx_run()
3610 vmx->nested.nested_run_pending = 0; in nested_vmx_run()
3616 vmx->nested.nested_run_pending = 0; in nested_vmx_run()
3752 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_complete_nested_posted_interrupt() local
3757 if (!vmx->nested.pi_desc || !vmx->nested.pi_pending) in vmx_complete_nested_posted_interrupt()
3760 vmx->nested.pi_pending = false; in vmx_complete_nested_posted_interrupt()
3761 if (!pi_test_and_clear_on(vmx->nested.pi_desc)) in vmx_complete_nested_posted_interrupt()
3764 max_irr = find_last_bit((unsigned long *)vmx->nested.pi_desc->pir, 256); in vmx_complete_nested_posted_interrupt()
3766 vapic_page = vmx->nested.virtual_apic_map.hva; in vmx_complete_nested_posted_interrupt()
3770 __kvm_apic_update_irr(vmx->nested.pi_desc->pir, in vmx_complete_nested_posted_interrupt()
3851 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_check_nested_events() local
3854 vmx->nested.nested_run_pending || kvm_event_needs_reinjection(vcpu); in vmx_check_nested_events()
3855 bool mtf_pending = vmx->nested.mtf_pending; in vmx_check_nested_events()
3863 vmx->nested.mtf_pending = false; in vmx_check_nested_events()
4013 struct vcpu_vmx *vmx = to_vmx(vcpu); in sync_vmcs02_to_vmcs12_rare() local
4054 vmx->nested.need_sync_vmcs02_to_vmcs12_rare = false; in sync_vmcs02_to_vmcs12_rare()
4060 struct vcpu_vmx *vmx = to_vmx(vcpu); in copy_vmcs02_to_vmcs12_rare() local
4063 if (!vmx->nested.need_sync_vmcs02_to_vmcs12_rare) in copy_vmcs02_to_vmcs12_rare()
4067 WARN_ON_ONCE(vmx->loaded_vmcs != &vmx->vmcs01); in copy_vmcs02_to_vmcs12_rare()
4070 vmx->loaded_vmcs = &vmx->nested.vmcs02; in copy_vmcs02_to_vmcs12_rare()
4071 vmx_vcpu_load_vmcs(vcpu, cpu, &vmx->vmcs01); in copy_vmcs02_to_vmcs12_rare()
4075 vmx->loaded_vmcs = &vmx->vmcs01; in copy_vmcs02_to_vmcs12_rare()
4076 vmx_vcpu_load_vmcs(vcpu, cpu, &vmx->nested.vmcs02); in copy_vmcs02_to_vmcs12_rare()
4088 struct vcpu_vmx *vmx = to_vmx(vcpu); in sync_vmcs02_to_vmcs12() local
4090 if (vmx->nested.hv_evmcs) in sync_vmcs02_to_vmcs12()
4093 vmx->nested.need_sync_vmcs02_to_vmcs12_rare = !vmx->nested.hv_evmcs; in sync_vmcs02_to_vmcs12()
4115 !vmx->nested.nested_run_pending) in sync_vmcs02_to_vmcs12()
4342 static inline u64 nested_vmx_get_vmcs01_guest_efer(struct vcpu_vmx *vmx) in nested_vmx_get_vmcs01_guest_efer() argument
4347 if (vm_entry_controls_get(vmx) & VM_ENTRY_LOAD_IA32_EFER) in nested_vmx_get_vmcs01_guest_efer()
4353 for (i = 0; i < vmx->msr_autoload.guest.nr; ++i) { in nested_vmx_get_vmcs01_guest_efer()
4354 if (vmx->msr_autoload.guest.val[i].index == MSR_EFER) in nested_vmx_get_vmcs01_guest_efer()
4355 return vmx->msr_autoload.guest.val[i].value; in nested_vmx_get_vmcs01_guest_efer()
4358 efer_msr = vmx_find_uret_msr(vmx, MSR_EFER); in nested_vmx_get_vmcs01_guest_efer()
4368 struct vcpu_vmx *vmx = to_vmx(vcpu); in nested_vmx_restore_host_state() local
4392 vmx_set_efer(vcpu, nested_vmx_get_vmcs01_guest_efer(vmx)); in nested_vmx_restore_host_state()
4481 struct vcpu_vmx *vmx = to_vmx(vcpu); in nested_vmx_vmexit() local
4485 WARN_ON_ONCE(vmx->nested.nested_run_pending); in nested_vmx_vmexit()
4517 if (likely(!vmx->fail)) { in nested_vmx_vmexit()
4557 vmx_switch_vmcs(vcpu, &vmx->vmcs01); in nested_vmx_vmexit()
4560 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, vmx->msr_autoload.host.nr); in nested_vmx_vmexit()
4561 vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, vmx->msr_autoload.guest.nr); in nested_vmx_vmexit()
4563 if (vmx->nested.l1_tpr_threshold != -1) in nested_vmx_vmexit()
4564 vmcs_write32(TPR_THRESHOLD, vmx->nested.l1_tpr_threshold); in nested_vmx_vmexit()
4567 decache_tsc_multiplier(vmx); in nested_vmx_vmexit()
4569 if (vmx->nested.change_vmcs01_virtual_apic_mode) { in nested_vmx_vmexit()
4570 vmx->nested.change_vmcs01_virtual_apic_mode = false; in nested_vmx_vmexit()
4575 if (vmx->nested.apic_access_page) { in nested_vmx_vmexit()
4576 kvm_release_page_clean(vmx->nested.apic_access_page); in nested_vmx_vmexit()
4577 vmx->nested.apic_access_page = NULL; in nested_vmx_vmexit()
4579 kvm_vcpu_unmap(vcpu, &vmx->nested.virtual_apic_map, true); in nested_vmx_vmexit()
4580 kvm_vcpu_unmap(vcpu, &vmx->nested.pi_desc_map, true); in nested_vmx_vmexit()
4581 vmx->nested.pi_desc = NULL; in nested_vmx_vmexit()
4583 if (vmx->nested.reload_vmcs01_apic_access_page) { in nested_vmx_vmexit()
4584 vmx->nested.reload_vmcs01_apic_access_page = false; in nested_vmx_vmexit()
4589 (enable_shadow_vmcs || vmx->nested.hv_evmcs)) in nested_vmx_vmexit()
4590 vmx->nested.need_vmcs12_to_shadow_sync = true; in nested_vmx_vmexit()
4595 if (likely(!vmx->fail)) { in nested_vmx_vmexit()
4634 vmx->fail = 0; in nested_vmx_vmexit()
4769 struct vcpu_vmx *vmx; in nested_vmx_pmu_entry_exit_ctls_update() local
4774 vmx = to_vmx(vcpu); in nested_vmx_pmu_entry_exit_ctls_update()
4776 vmx->nested.msrs.entry_ctls_high |= in nested_vmx_pmu_entry_exit_ctls_update()
4778 vmx->nested.msrs.exit_ctls_high |= in nested_vmx_pmu_entry_exit_ctls_update()
4781 vmx->nested.msrs.entry_ctls_high &= in nested_vmx_pmu_entry_exit_ctls_update()
4783 vmx->nested.msrs.exit_ctls_high &= in nested_vmx_pmu_entry_exit_ctls_update()
4818 struct vcpu_vmx *vmx = to_vmx(vcpu); in alloc_shadow_vmcs() local
4819 struct loaded_vmcs *loaded_vmcs = vmx->loaded_vmcs; in alloc_shadow_vmcs()
4827 WARN_ON(loaded_vmcs == &vmx->vmcs01 && loaded_vmcs->shadow_vmcs); in alloc_shadow_vmcs()
4839 struct vcpu_vmx *vmx = to_vmx(vcpu); in enter_vmx_operation() local
4842 r = alloc_loaded_vmcs(&vmx->nested.vmcs02); in enter_vmx_operation()
4846 vmx->nested.cached_vmcs12 = kzalloc(VMCS12_SIZE, GFP_KERNEL_ACCOUNT); in enter_vmx_operation()
4847 if (!vmx->nested.cached_vmcs12) in enter_vmx_operation()
4850 vmx->nested.cached_shadow_vmcs12 = kzalloc(VMCS12_SIZE, GFP_KERNEL_ACCOUNT); in enter_vmx_operation()
4851 if (!vmx->nested.cached_shadow_vmcs12) in enter_vmx_operation()
4857 hrtimer_init(&vmx->nested.preemption_timer, CLOCK_MONOTONIC, in enter_vmx_operation()
4859 vmx->nested.preemption_timer.function = vmx_preemption_timer_fn; in enter_vmx_operation()
4861 vmx->nested.vpid02 = allocate_vpid(); in enter_vmx_operation()
4863 vmx->nested.vmcs02_initialized = false; in enter_vmx_operation()
4864 vmx->nested.vmxon = true; in enter_vmx_operation()
4867 vmx->pt_desc.guest.ctl = 0; in enter_vmx_operation()
4874 kfree(vmx->nested.cached_shadow_vmcs12); in enter_vmx_operation()
4877 kfree(vmx->nested.cached_vmcs12); in enter_vmx_operation()
4880 free_loaded_vmcs(&vmx->nested.vmcs02); in enter_vmx_operation()
4899 struct vcpu_vmx *vmx = to_vmx(vcpu); in handle_vmon() local
4928 if (vmx->nested.vmxon) in handle_vmon()
4931 if ((vmx->msr_ia32_feature_control & VMXON_NEEDED_FEATURES) in handle_vmon()
4955 vmx->nested.vmxon_ptr = vmptr; in handle_vmon()
4965 struct vcpu_vmx *vmx = to_vmx(vcpu); in nested_release_vmcs12() local
4967 if (vmx->nested.current_vmptr == -1ull) in nested_release_vmcs12()
4975 copy_shadow_to_vmcs12(vmx); in nested_release_vmcs12()
4976 vmx_disable_shadow_vmcs(vmx); in nested_release_vmcs12()
4978 vmx->nested.posted_intr_nv = -1; in nested_release_vmcs12()
4982 vmx->nested.current_vmptr >> PAGE_SHIFT, in nested_release_vmcs12()
4983 vmx->nested.cached_vmcs12, 0, VMCS12_SIZE); in nested_release_vmcs12()
4987 vmx->nested.current_vmptr = -1ull; in nested_release_vmcs12()
5007 struct vcpu_vmx *vmx = to_vmx(vcpu); in handle_vmclear() local
5022 if (vmptr == vmx->nested.vmxon_ptr) in handle_vmclear()
5035 if (likely(!vmx->nested.enlightened_vmcs_enabled || in handle_vmclear()
5037 if (vmptr == vmx->nested.current_vmptr) in handle_vmclear()
5068 struct vcpu_vmx *vmx = to_vmx(vcpu); in handle_vmread() local
5083 if (vmx->nested.current_vmptr == -1ull || in handle_vmread()
5152 struct vcpu_vmx *vmx = to_vmx(vcpu); in handle_vmwrite() local
5175 if (vmx->nested.current_vmptr == -1ull || in handle_vmwrite()
5239 vmcs_load(vmx->vmcs01.shadow_vmcs); in handle_vmwrite()
5243 vmcs_clear(vmx->vmcs01.shadow_vmcs); in handle_vmwrite()
5244 vmcs_load(vmx->loaded_vmcs->vmcs); in handle_vmwrite()
5247 vmx->nested.dirty_vmcs12 = true; in handle_vmwrite()
5253 static void set_current_vmptr(struct vcpu_vmx *vmx, gpa_t vmptr) in set_current_vmptr() argument
5255 vmx->nested.current_vmptr = vmptr; in set_current_vmptr()
5257 secondary_exec_controls_setbit(vmx, SECONDARY_EXEC_SHADOW_VMCS); in set_current_vmptr()
5259 __pa(vmx->vmcs01.shadow_vmcs)); in set_current_vmptr()
5260 vmx->nested.need_vmcs12_to_shadow_sync = true; in set_current_vmptr()
5262 vmx->nested.dirty_vmcs12 = true; in set_current_vmptr()
5268 struct vcpu_vmx *vmx = to_vmx(vcpu); in handle_vmptrld() local
5281 if (vmptr == vmx->nested.vmxon_ptr) in handle_vmptrld()
5285 if (vmx->nested.hv_evmcs) in handle_vmptrld()
5288 if (vmx->nested.current_vmptr != vmptr) { in handle_vmptrld()
5319 memcpy(vmx->nested.cached_vmcs12, new_vmcs12, VMCS12_SIZE); in handle_vmptrld()
5322 set_current_vmptr(vmx, vmptr); in handle_vmptrld()
5367 struct vcpu_vmx *vmx = to_vmx(vcpu); in handle_invept() local
5378 if (!(vmx->nested.msrs.secondary_ctls_high & in handle_invept()
5380 !(vmx->nested.msrs.ept_caps & VMX_EPT_INVEPT_BIT)) { in handle_invept()
5391 types = (vmx->nested.msrs.ept_caps >> VMX_EPT_EXTENT_SHIFT) & 6; in handle_invept()
5446 struct vcpu_vmx *vmx = to_vmx(vcpu); in handle_invvpid() local
5458 if (!(vmx->nested.msrs.secondary_ctls_high & in handle_invvpid()
5460 !(vmx->nested.msrs.vpid_caps & VMX_VPID_INVVPID_BIT)) { in handle_invvpid()
5471 types = (vmx->nested.msrs.vpid_caps & in handle_invvpid()
5568 struct vcpu_vmx *vmx = to_vmx(vcpu); in handle_vmfunc() local
5602 nested_vmx_vmexit(vcpu, vmx->exit_reason.full, in handle_vmfunc()
6011 struct vcpu_vmx *vmx = to_vmx(vcpu); in nested_vmx_reflect_vmexit() local
6012 union vmx_exit_reason exit_reason = vmx->exit_reason; in nested_vmx_reflect_vmexit()
6016 WARN_ON_ONCE(vmx->nested.nested_run_pending); in nested_vmx_reflect_vmexit()
6022 if (unlikely(vmx->fail)) { in nested_vmx_reflect_vmexit()
6065 struct vcpu_vmx *vmx; in vmx_get_nested_state() local
6071 .hdr.vmx.flags = 0, in vmx_get_nested_state()
6072 .hdr.vmx.vmxon_pa = -1ull, in vmx_get_nested_state()
6073 .hdr.vmx.vmcs12_pa = -1ull, in vmx_get_nested_state()
6074 .hdr.vmx.preemption_timer_deadline = 0, in vmx_get_nested_state()
6077 &user_kvm_nested_state->data.vmx[0]; in vmx_get_nested_state()
6082 vmx = to_vmx(vcpu); in vmx_get_nested_state()
6086 (vmx->nested.vmxon || vmx->nested.smm.vmxon)) { in vmx_get_nested_state()
6087 kvm_state.hdr.vmx.vmxon_pa = vmx->nested.vmxon_ptr; in vmx_get_nested_state()
6088 kvm_state.hdr.vmx.vmcs12_pa = vmx->nested.current_vmptr; in vmx_get_nested_state()
6093 if (vmx->nested.hv_evmcs) in vmx_get_nested_state()
6102 if (vmx->nested.smm.vmxon) in vmx_get_nested_state()
6103 kvm_state.hdr.vmx.smm.flags |= KVM_STATE_NESTED_SMM_VMXON; in vmx_get_nested_state()
6105 if (vmx->nested.smm.guest_mode) in vmx_get_nested_state()
6106 kvm_state.hdr.vmx.smm.flags |= KVM_STATE_NESTED_SMM_GUEST_MODE; in vmx_get_nested_state()
6111 if (vmx->nested.nested_run_pending) in vmx_get_nested_state()
6114 if (vmx->nested.mtf_pending) in vmx_get_nested_state()
6118 vmx->nested.has_preemption_timer_deadline) { in vmx_get_nested_state()
6119 kvm_state.hdr.vmx.flags |= in vmx_get_nested_state()
6121 kvm_state.hdr.vmx.preemption_timer_deadline = in vmx_get_nested_state()
6122 vmx->nested.preemption_timer_deadline; in vmx_get_nested_state()
6148 if (!vmx->nested.need_vmcs12_to_shadow_sync) { in vmx_get_nested_state()
6149 if (vmx->nested.hv_evmcs) in vmx_get_nested_state()
6150 copy_enlightened_to_vmcs12(vmx); in vmx_get_nested_state()
6152 copy_shadow_to_vmcs12(vmx); in vmx_get_nested_state()
6192 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_set_nested_state() local
6196 &user_kvm_nested_state->data.vmx[0]; in vmx_set_nested_state()
6202 if (kvm_state->hdr.vmx.vmxon_pa == -1ull) { in vmx_set_nested_state()
6203 if (kvm_state->hdr.vmx.smm.flags) in vmx_set_nested_state()
6206 if (kvm_state->hdr.vmx.vmcs12_pa != -1ull) in vmx_set_nested_state()
6224 if (!page_address_valid(vcpu, kvm_state->hdr.vmx.vmxon_pa)) in vmx_set_nested_state()
6228 if ((kvm_state->hdr.vmx.smm.flags & KVM_STATE_NESTED_SMM_GUEST_MODE) && in vmx_set_nested_state()
6232 if (kvm_state->hdr.vmx.smm.flags & in vmx_set_nested_state()
6236 if (kvm_state->hdr.vmx.flags & ~KVM_STATE_VMX_PREEMPTION_TIMER_DEADLINE) in vmx_set_nested_state()
6247 : kvm_state->hdr.vmx.smm.flags) in vmx_set_nested_state()
6250 if ((kvm_state->hdr.vmx.smm.flags & KVM_STATE_NESTED_SMM_GUEST_MODE) && in vmx_set_nested_state()
6251 !(kvm_state->hdr.vmx.smm.flags & KVM_STATE_NESTED_SMM_VMXON)) in vmx_set_nested_state()
6255 (!nested_vmx_allowed(vcpu) || !vmx->nested.enlightened_vmcs_enabled)) in vmx_set_nested_state()
6260 if (kvm_state->hdr.vmx.vmxon_pa == -1ull) in vmx_set_nested_state()
6263 vmx->nested.vmxon_ptr = kvm_state->hdr.vmx.vmxon_pa; in vmx_set_nested_state()
6273 (kvm_state->hdr.vmx.vmcs12_pa != -1ull)) in vmx_set_nested_state()
6279 if (kvm_state->hdr.vmx.vmcs12_pa != -1ull) { in vmx_set_nested_state()
6280 if (kvm_state->hdr.vmx.vmcs12_pa == kvm_state->hdr.vmx.vmxon_pa || in vmx_set_nested_state()
6281 !page_address_valid(vcpu, kvm_state->hdr.vmx.vmcs12_pa)) in vmx_set_nested_state()
6284 set_current_vmptr(vmx, kvm_state->hdr.vmx.vmcs12_pa); in vmx_set_nested_state()
6297 if (kvm_state->hdr.vmx.smm.flags & KVM_STATE_NESTED_SMM_VMXON) { in vmx_set_nested_state()
6298 vmx->nested.smm.vmxon = true; in vmx_set_nested_state()
6299 vmx->nested.vmxon = false; in vmx_set_nested_state()
6301 if (kvm_state->hdr.vmx.smm.flags & KVM_STATE_NESTED_SMM_GUEST_MODE) in vmx_set_nested_state()
6302 vmx->nested.smm.guest_mode = true; in vmx_set_nested_state()
6315 vmx->nested.nested_run_pending = in vmx_set_nested_state()
6318 vmx->nested.mtf_pending = in vmx_set_nested_state()
6343 vmx->nested.has_preemption_timer_deadline = false; in vmx_set_nested_state()
6344 if (kvm_state->hdr.vmx.flags & KVM_STATE_VMX_PREEMPTION_TIMER_DEADLINE) { in vmx_set_nested_state()
6345 vmx->nested.has_preemption_timer_deadline = true; in vmx_set_nested_state()
6346 vmx->nested.preemption_timer_deadline = in vmx_set_nested_state()
6347 kvm_state->hdr.vmx.preemption_timer_deadline; in vmx_set_nested_state()
6355 vmx->nested.dirty_vmcs12 = true; in vmx_set_nested_state()
6363 vmx->nested.nested_run_pending = 0; in vmx_set_nested_state()