Lines Matching refs:to_vmx

544 			tmp_eptp = to_vmx(vcpu)->ept_pointer;  in check_ept_pointer_match()
545 } else if (tmp_eptp != to_vmx(vcpu)->ept_pointer) { in check_ept_pointer_match()
567 u64 ept_pointer = to_vmx(vcpu)->ept_pointer; in __hv_remote_flush_tlb_with_range()
595 if (VALID_PAGE(to_vmx(vcpu)->ept_pointer)) in hv_remote_flush_tlb_with_range()
627 evmcs = (struct hv_enlightened_vmcs *)to_vmx(vcpu)->loaded_vmcs->vmcs; in hv_enable_direct_tlbflush()
888 if (to_vmx(vcpu)->rmode.vm86_active) in update_exception_bitmap()
1275 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_prepare_switch_to_guest()
1406 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_vcpu_load_vmcs()
1477 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_vcpu_load()
1490 vmx_prepare_switch_to_host(to_vmx(vcpu)); in vmx_vcpu_put()
1500 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_get_rflags()
1518 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_set_rflags()
1571 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_rtit_ctl_check()
1659 to_vmx(vcpu)->exit_reason.basic != EXIT_REASON_EPT_MISCONFIG) { in skip_emulated_instruction()
1690 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_update_emulated_instruction()
1731 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_queue_exception()
1859 uint64_t valid_bits = to_vmx(vcpu)->msr_ia32_feature_control_valid_bits; in vmx_feature_control_msr_valid()
1886 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_get_msr()
1920 msr_info->data = to_vmx(vcpu)->spec_ctrl; in vmx_get_msr()
2047 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_set_msr()
2200 !(to_vmx(vcpu)->msr_ia32_feature_control & in vmx_set_msr()
2208 (to_vmx(vcpu)->msr_ia32_feature_control & in vmx_set_msr()
2811 struct vcpu_vmx *vmx = to_vmx(vcpu); in enter_pmode()
2882 struct vcpu_vmx *vmx = to_vmx(vcpu); in enter_rmode()
2930 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_set_efer()
2939 vm_entry_controls_setbit(to_vmx(vcpu), VM_ENTRY_IA32E_MODE); in vmx_set_efer()
2942 vm_entry_controls_clearbit(to_vmx(vcpu), VM_ENTRY_IA32E_MODE); in vmx_set_efer()
2956 vmx_segment_cache_clear(to_vmx(vcpu)); in enter_lmode()
2971 vm_entry_controls_clearbit(to_vmx(vcpu), VM_ENTRY_IA32E_MODE); in exit_lmode()
2979 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_flush_tlb_all()
3004 return to_vmx(vcpu)->vpid; in vmx_get_current_vpid()
3078 struct vcpu_vmx *vmx = to_vmx(vcpu); in ept_update_paging_mode_cr0()
3102 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_set_cr0()
3175 to_vmx(vcpu)->ept_pointer = eptp; in vmx_load_mmu_pgd()
3206 if (to_vmx(vcpu)->nested.vmxon && !nested_cr4_valid(vcpu, cr4)) in vmx_is_valid_cr4()
3214 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_set_cr4()
3274 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_get_segment()
3312 if (to_vmx(vcpu)->rmode.vm86_active) { in vmx_get_segment_base()
3316 return vmx_read_guest_seg_base(to_vmx(vcpu), seg); in vmx_get_segment_base()
3321 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_get_cpl()
3353 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_set_segment()
3393 u32 ar = vmx_read_guest_seg_ar(to_vmx(vcpu), VCPU_SREG_CS); in vmx_get_cs_db_l_bits()
3801 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_disable_intercept_for_msr()
3847 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_enable_intercept_for_msr()
3892 (secondary_exec_controls_get(to_vmx(vcpu)) & in vmx_msr_bitmap_mode()
3904 unsigned long *msr_bitmap = to_vmx(vcpu)->vmcs01.msr_bitmap; in vmx_reset_x2apic_msrs()
3942 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_update_msr_bitmap()
3957 struct vcpu_vmx *vmx = to_vmx(vcpu); in pt_update_intercept_for_msr()
3973 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_guest_apic_has_interrupt()
3993 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_msr_filter_changed()
4056 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_deliver_nested_posted_interrupt()
4082 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_deliver_posted_interrupt()
4200 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_refresh_apicv_exec_ctrl()
4484 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_vcpu_reset()
4581 exec_controls_setbit(to_vmx(vcpu), CPU_BASED_INTR_WINDOW_EXITING); in enable_irq_window()
4592 exec_controls_setbit(to_vmx(vcpu), CPU_BASED_NMI_WINDOW_EXITING); in enable_nmi_window()
4597 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_inject_irq()
4625 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_inject_nmi()
4656 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_get_nmi_mask()
4670 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_set_nmi_mask()
4693 if (!enable_vnmi && to_vmx(vcpu)->loaded_vmcs->soft_vnmi_blocked) in vmx_nmi_blocked()
4703 if (to_vmx(vcpu)->nested.nested_run_pending) in vmx_nmi_allowed()
4725 if (to_vmx(vcpu)->nested.nested_run_pending) in vmx_interrupt_allowed()
4770 to_vmx(vcpu)->vcpu.arch.event_exit_inst_len = in rmode_exception()
4865 struct vcpu_vmx *vmx = to_vmx(vcpu); in handle_exception_nmi()
5081 if (to_vmx(vcpu)->nested.vmxon && in handle_set_cr0()
5228 exec_controls_clearbit(to_vmx(vcpu), CPU_BASED_MOV_DR_EXITING); in handle_dr()
5263 exec_controls_setbit(to_vmx(vcpu), CPU_BASED_MOV_DR_EXITING); in vmx_sync_dirty_debug_regs()
5279 exec_controls_clearbit(to_vmx(vcpu), CPU_BASED_INTR_WINDOW_EXITING); in handle_interrupt_window()
5373 struct vcpu_vmx *vmx = to_vmx(vcpu); in handle_task_switch()
5442 if (!(to_vmx(vcpu)->idt_vectoring_info & VECTORING_INFO_VALID_MASK) && in handle_ept_violation()
5505 exec_controls_clearbit(to_vmx(vcpu), CPU_BASED_NMI_WINDOW_EXITING); in handle_nmi_window()
5514 struct vcpu_vmx *vmx = to_vmx(vcpu); in handle_invalid_guest_state()
5559 struct vcpu_vmx *vmx = to_vmx(vcpu); in grow_ple_window()
5575 struct vcpu_vmx *vmx = to_vmx(vcpu); in shrink_ple_window()
5694 if (!(to_vmx(vcpu)->idt_vectoring_info & VECTORING_INFO_VALID_MASK) && in handle_pml_full()
5709 struct vcpu_vmx *vmx = to_vmx(vcpu); in handle_fastpath_preemption_timer()
5811 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_get_exit_info()
5838 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_flush_pml_buffer()
6054 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_handle_exit()
6286 to_vmx(vcpu)->nested.l1_tpr_threshold = tpr_threshold; in update_cr8_intercept()
6293 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_set_virtual_apic_mode()
6350 to_vmx(vcpu)->nested.reload_vmcs01_apic_access_page = true; in vmx_set_apic_access_page_addr()
6354 if (!(secondary_exec_controls_get(to_vmx(vcpu)) & in vmx_set_apic_access_page_addr()
6422 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_sync_pir_to_irr()
6466 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_apicv_post_state_restore()
6513 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_handle_exit_irqoff()
6674 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_update_hv_timer()
6733 switch (to_vmx(vcpu)->exit_reason.basic) { in vmx_exit_handlers_fastpath()
6811 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_vcpu_run()
6960 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_free_vcpu()
6975 vmx = to_vmx(vcpu); in vmx_create_vcpu()
7239 struct vcpu_vmx *vmx = to_vmx(vcpu); in nested_vmx_cr_fixed1_bits_update()
7279 struct vcpu_vmx *vmx = to_vmx(vcpu); in nested_vmx_entry_exit_ctls_update()
7296 struct vcpu_vmx *vmx = to_vmx(vcpu); in update_intel_pt_cfg()
7365 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_vcpu_after_set_cpuid()
7376 to_vmx(vcpu)->msr_ia32_feature_control_valid_bits |= in vmx_vcpu_after_set_cpuid()
7380 to_vmx(vcpu)->msr_ia32_feature_control_valid_bits &= in vmx_vcpu_after_set_cpuid()
7444 to_vmx(vcpu)->req_immediate_exit = true; in vmx_request_immediate_exit()
7556 vmx = to_vmx(vcpu); in vmx_set_hv_timer()
7591 to_vmx(vcpu)->hv_deadline_tsc = -1; in vmx_cancel_hv_timer()
7649 to_vmx(vcpu)->msr_ia32_feature_control_valid_bits |= in vmx_setup_mce()
7652 to_vmx(vcpu)->msr_ia32_feature_control_valid_bits &= in vmx_setup_mce()
7659 if (to_vmx(vcpu)->nested.nested_run_pending) in vmx_smi_allowed()
7666 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_pre_enter_smm()
7680 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_pre_leave_smm()
7705 return to_vmx(vcpu)->nested.vmxon; in vmx_apic_init_signal_blocked()
7711 struct hrtimer *timer = &to_vmx(vcpu)->nested.preemption_timer; in vmx_migrate_timers()