Lines Matching refs:kvm_x86_ops

114 struct kvm_x86_ops kvm_x86_ops __read_mostly;
115 EXPORT_SYMBOL_GPL(kvm_x86_ops);
714 if (kvm_x86_ops.get_cpl(vcpu) <= required_cpl) in kvm_require_cpl()
856 kvm_x86_ops.get_cs_db_l_bits(vcpu, &cs_db, &cs_l); in kvm_set_cr0()
869 kvm_x86_ops.set_cr0(vcpu, cr0); in kvm_set_cr0()
979 if (kvm_x86_ops.get_cpl(vcpu) != 0 || in kvm_set_xcr()
996 if (!kvm_x86_ops.is_valid_cr4(vcpu, cr4)) in kvm_valid_cr4()
1033 kvm_x86_ops.set_cr4(vcpu, cr4); in kvm_set_cr4()
1121 kvm_x86_ops.set_dr7(vcpu, dr7); in kvm_update_dr7()
1460 return kvm_x86_ops.get_msr_feature(msr); in kvm_get_msr_feature()
1536 r = kvm_x86_ops.set_efer(vcpu, efer); in set_efer()
1642 return kvm_x86_ops.set_msr(vcpu, &msr); in __kvm_set_msr()
1675 ret = kvm_x86_ops.get_msr(vcpu, &msr); in __kvm_get_msr()
2263 vcpu->arch.tsc_offset = kvm_x86_ops.write_l1_tsc_offset(vcpu, offset); in kvm_vcpu_write_tsc_offset()
3012 kvm_x86_ops.tlb_flush_all(vcpu); in kvm_vcpu_flush_tlb_all()
3018 kvm_x86_ops.tlb_flush_guest(vcpu); in kvm_vcpu_flush_tlb_guest()
3842 r = kvm_x86_ops.has_emulated_msr(MSR_IA32_SMBASE); in kvm_vm_ioctl_check_extension()
3845 r = !kvm_x86_ops.cpu_has_accelerated_tpr(); in kvm_vm_ioctl_check_extension()
3872 r = kvm_x86_ops.nested_ops->get_state ? in kvm_vm_ioctl_check_extension()
3873 kvm_x86_ops.nested_ops->get_state(NULL, NULL, 0) : 0; in kvm_vm_ioctl_check_extension()
3876 r = kvm_x86_ops.enable_direct_tlbflush != NULL; in kvm_vm_ioctl_check_extension()
3879 r = kvm_x86_ops.nested_ops->enable_evmcs != NULL; in kvm_vm_ioctl_check_extension()
4001 if (kvm_x86_ops.has_wbinvd_exit()) in kvm_arch_vcpu_load()
4008 kvm_x86_ops.vcpu_load(vcpu, cpu); in kvm_arch_vcpu_load()
4078 vcpu->arch.preempted_in_kernel = !kvm_x86_ops.get_cpl(vcpu); in kvm_arch_vcpu_put()
4097 kvm_x86_ops.vcpu_put(vcpu); in kvm_arch_vcpu_put()
4111 kvm_x86_ops.sync_pir_to_irr(vcpu); in kvm_vcpu_ioctl_get_lapic()
4230 kvm_x86_ops.setup_mce(vcpu); in kvm_vcpu_ioctl_x86_setup_mce()
4337 events->interrupt.shadow = kvm_x86_ops.get_interrupt_shadow(vcpu); in kvm_vcpu_ioctl_x86_get_vcpu_events()
4341 events->nmi.masked = kvm_x86_ops.get_nmi_mask(vcpu); in kvm_vcpu_ioctl_x86_get_vcpu_events()
4408 kvm_x86_ops.set_interrupt_shadow(vcpu, in kvm_vcpu_ioctl_x86_set_vcpu_events()
4414 kvm_x86_ops.set_nmi_mask(vcpu, events->nmi.masked); in kvm_vcpu_ioctl_x86_set_vcpu_events()
4427 kvm_x86_ops.nested_ops->leave_nested(vcpu); in kvm_vcpu_ioctl_x86_set_vcpu_events()
4689 if (!kvm_x86_ops.nested_ops->enable_evmcs) in kvm_vcpu_ioctl_enable_cap()
4691 r = kvm_x86_ops.nested_ops->enable_evmcs(vcpu, &vmcs_version); in kvm_vcpu_ioctl_enable_cap()
4700 if (!kvm_x86_ops.enable_direct_tlbflush) in kvm_vcpu_ioctl_enable_cap()
4703 return kvm_x86_ops.enable_direct_tlbflush(vcpu); in kvm_vcpu_ioctl_enable_cap()
5014 if (!kvm_x86_ops.nested_ops->get_state) in kvm_arch_vcpu_ioctl()
5022 r = kvm_x86_ops.nested_ops->get_state(vcpu, user_kvm_nested_state, in kvm_arch_vcpu_ioctl()
5044 if (!kvm_x86_ops.nested_ops->set_state) in kvm_arch_vcpu_ioctl()
5067 r = kvm_x86_ops.nested_ops->set_state(vcpu, user_kvm_nested_state, &kvm_state); in kvm_arch_vcpu_ioctl()
5111 ret = kvm_x86_ops.set_tss_addr(kvm, addr); in kvm_vm_ioctl_set_tss_addr()
5118 return kvm_x86_ops.set_identity_map_addr(kvm, ident_addr); in kvm_vm_ioctl_set_identity_map_addr()
5275 if (kvm_x86_ops.flush_log_dirty) in kvm_arch_sync_dirty_log()
5276 kvm_x86_ops.flush_log_dirty(kvm); in kvm_arch_sync_dirty_log()
5818 if (kvm_x86_ops.mem_enc_op) in kvm_arch_vm_ioctl()
5819 r = kvm_x86_ops.mem_enc_op(kvm, argp); in kvm_arch_vm_ioctl()
5830 if (kvm_x86_ops.mem_enc_reg_region) in kvm_arch_vm_ioctl()
5831 r = kvm_x86_ops.mem_enc_reg_region(kvm, &region); in kvm_arch_vm_ioctl()
5842 if (kvm_x86_ops.mem_enc_unreg_region) in kvm_arch_vm_ioctl()
5843 r = kvm_x86_ops.mem_enc_unreg_region(kvm, &region); in kvm_arch_vm_ioctl()
5952 if (!kvm_x86_ops.has_emulated_msr(emulated_msrs_all[i])) in kvm_init_msr_list()
6015 kvm_x86_ops.set_segment(vcpu, var, seg); in kvm_set_segment()
6021 kvm_x86_ops.get_segment(vcpu, var, seg); in kvm_get_segment()
6041 u32 access = (kvm_x86_ops.get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0; in kvm_mmu_gva_to_gpa_read()
6048 u32 access = (kvm_x86_ops.get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0; in kvm_mmu_gva_to_gpa_fetch()
6056 u32 access = (kvm_x86_ops.get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0; in kvm_mmu_gva_to_gpa_write()
6105 u32 access = (kvm_x86_ops.get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0; in kvm_fetch_guest_virt()
6130 u32 access = (kvm_x86_ops.get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0; in kvm_read_guest_virt()
6151 if (!system && kvm_x86_ops.get_cpl(vcpu) == 3) in emulator_read_std()
6204 if (!system && kvm_x86_ops.get_cpl(vcpu) == 3) in emulator_write_std()
6229 if (unlikely(!kvm_x86_ops.can_emulate_instruction(vcpu, NULL, 0))) in handle_ud()
6263 u32 access = ((kvm_x86_ops.get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0) in vcpu_mmio_gva_to_gpa()
6671 return kvm_x86_ops.get_segment_base(vcpu, seg); in get_segment_base()
6684 if (kvm_x86_ops.has_wbinvd_exit()) { in kvm_emulate_wbinvd_noskip()
6789 return kvm_x86_ops.get_cpl(emul_to_vcpu(ctxt)); in emulator_get_cpl()
6794 kvm_x86_ops.get_gdt(emul_to_vcpu(ctxt), dt); in emulator_get_gdt()
6799 kvm_x86_ops.get_idt(emul_to_vcpu(ctxt), dt); in emulator_get_idt()
6804 kvm_x86_ops.set_gdt(emul_to_vcpu(ctxt), dt); in emulator_set_gdt()
6809 kvm_x86_ops.set_idt(emul_to_vcpu(ctxt), dt); in emulator_set_idt()
6951 return kvm_x86_ops.check_intercept(emul_to_vcpu(ctxt), info, stage, in emulator_intercept()
6994 kvm_x86_ops.set_nmi_mask(emul_to_vcpu(ctxt), masked); in emulator_set_nmi_mask()
7013 return kvm_x86_ops.pre_leave_smm(emul_to_vcpu(ctxt), smstate); in emulator_pre_leave_smm()
7076 u32 int_shadow = kvm_x86_ops.get_interrupt_shadow(vcpu); in toggle_interruptibility()
7087 kvm_x86_ops.set_interrupt_shadow(vcpu, mask); in toggle_interruptibility()
7129 kvm_x86_ops.get_cs_db_l_bits(vcpu, &cs_db, &cs_l); in init_emulate_ctxt()
7195 if (!is_guest_mode(vcpu) && kvm_x86_ops.get_cpl(vcpu) == 0) { in handle_emulation_failure()
7376 unsigned long rflags = kvm_x86_ops.get_rflags(vcpu); in kvm_skip_emulated_instruction()
7379 r = kvm_x86_ops.skip_emulated_instruction(vcpu); in kvm_skip_emulated_instruction()
7502 if (unlikely(!kvm_x86_ops.can_emulate_instruction(vcpu, insn, insn_len))) in x86_emulate_instruction()
7634 unsigned long rflags = kvm_x86_ops.get_rflags(vcpu); in x86_emulate_instruction()
7648 if (kvm_x86_ops.update_emulated_instruction) in x86_emulate_instruction()
7649 kvm_x86_ops.update_emulated_instruction(vcpu); in x86_emulate_instruction()
7978 user_mode = kvm_x86_ops.get_cpl(__this_cpu_read(current_vcpu)); in kvm_is_user_mode()
8071 if (kvm_x86_ops.hardware_enable) { in kvm_arch_init()
8179 kvm_x86_ops.hardware_enable = NULL; in kvm_arch_exit()
8320 if (kvm_x86_ops.get_cpl(vcpu) != 0) { in kvm_emulate_hypercall()
8377 kvm_x86_ops.patch_hypercall(vcpu, instruction); in emulator_fix_hypercall()
8406 if (!kvm_x86_ops.update_cr8_intercept) in update_cr8_intercept()
8425 kvm_x86_ops.update_cr8_intercept(vcpu, tpr, max_irr); in update_cr8_intercept()
8437 kvm_x86_ops.queue_exception(vcpu); in kvm_inject_exception()
8467 kvm_x86_ops.set_nmi(vcpu); in inject_pending_event()
8470 kvm_x86_ops.set_irq(vcpu); in inject_pending_event()
8485 r = kvm_x86_ops.nested_ops->check_events(vcpu); in inject_pending_event()
8534 r = can_inject ? kvm_x86_ops.smi_allowed(vcpu, true) : -EBUSY; in inject_pending_event()
8543 kvm_x86_ops.enable_smi_window(vcpu); in inject_pending_event()
8547 r = can_inject ? kvm_x86_ops.nmi_allowed(vcpu, true) : -EBUSY; in inject_pending_event()
8553 kvm_x86_ops.set_nmi(vcpu); in inject_pending_event()
8555 WARN_ON(kvm_x86_ops.nmi_allowed(vcpu, true) < 0); in inject_pending_event()
8558 kvm_x86_ops.enable_nmi_window(vcpu); in inject_pending_event()
8562 r = can_inject ? kvm_x86_ops.interrupt_allowed(vcpu, true) : -EBUSY; in inject_pending_event()
8567 kvm_x86_ops.set_irq(vcpu); in inject_pending_event()
8568 WARN_ON(kvm_x86_ops.interrupt_allowed(vcpu, true) < 0); in inject_pending_event()
8571 kvm_x86_ops.enable_irq_window(vcpu); in inject_pending_event()
8575 kvm_x86_ops.nested_ops->hv_timer_pending && in inject_pending_event()
8576 kvm_x86_ops.nested_ops->hv_timer_pending(vcpu)) in inject_pending_event()
8596 if (kvm_x86_ops.get_nmi_mask(vcpu) || vcpu->arch.nmi_injected) in process_nmi()
8686 kvm_x86_ops.get_gdt(vcpu, &dt); in enter_smm_save_state_32()
8690 kvm_x86_ops.get_idt(vcpu, &dt); in enter_smm_save_state_32()
8740 kvm_x86_ops.get_idt(vcpu, &dt); in enter_smm_save_state_64()
8750 kvm_x86_ops.get_gdt(vcpu, &dt); in enter_smm_save_state_64()
8780 kvm_x86_ops.pre_enter_smm(vcpu, buf); in enter_smm()
8785 if (kvm_x86_ops.get_nmi_mask(vcpu)) in enter_smm()
8788 kvm_x86_ops.set_nmi_mask(vcpu, true); in enter_smm()
8794 kvm_x86_ops.set_cr0(vcpu, cr0); in enter_smm()
8797 kvm_x86_ops.set_cr4(vcpu, 0); in enter_smm()
8801 kvm_x86_ops.set_idt(vcpu, &dt); in enter_smm()
8832 kvm_x86_ops.set_efer(vcpu, 0); in enter_smm()
8870 kvm_x86_ops.refresh_apicv_exec_ctrl(vcpu); in kvm_vcpu_update_apicv()
8886 if (!kvm_x86_ops.check_apicv_inhibit_reasons || in kvm_request_apicv_update()
8887 !kvm_x86_ops.check_apicv_inhibit_reasons(bit)) in kvm_request_apicv_update()
8906 if (kvm_x86_ops.pre_update_apicv_exec_ctrl) in kvm_request_apicv_update()
8907 kvm_x86_ops.pre_update_apicv_exec_ctrl(kvm, activate); in kvm_request_apicv_update()
8933 kvm_x86_ops.sync_pir_to_irr(vcpu); in vcpu_scan_ioapic()
8953 kvm_x86_ops.load_eoi_exitmap(vcpu, eoi_exit_bitmap); in vcpu_load_eoi_exitmap()
8972 if (kvm_x86_ops.guest_memory_reclaimed) in kvm_arch_guest_memory_reclaimed()
8973 kvm_x86_ops.guest_memory_reclaimed(kvm); in kvm_arch_guest_memory_reclaimed()
8981 if (!kvm_x86_ops.set_apic_access_page_addr) in kvm_vcpu_reload_apic_access_page()
8984 kvm_x86_ops.set_apic_access_page_addr(vcpu); in kvm_vcpu_reload_apic_access_page()
9010 if (unlikely(!kvm_x86_ops.nested_ops->get_nested_state_pages(vcpu))) { in vcpu_enter_guest()
9118 kvm_x86_ops.msr_filter_changed(vcpu); in vcpu_enter_guest()
9131 kvm_x86_ops.enable_irq_window(vcpu); in vcpu_enter_guest()
9146 kvm_x86_ops.prepare_guest_switch(vcpu); in vcpu_enter_guest()
9177 kvm_x86_ops.sync_pir_to_irr(vcpu); in vcpu_enter_guest()
9191 kvm_x86_ops.request_immediate_exit(vcpu); in vcpu_enter_guest()
9212 exit_fastpath = kvm_x86_ops.run(vcpu); in vcpu_enter_guest()
9222 kvm_x86_ops.sync_dirty_debug_regs(vcpu); in vcpu_enter_guest()
9244 kvm_x86_ops.handle_exit_irqoff(vcpu); in vcpu_enter_guest()
9295 r = kvm_x86_ops.handle_exit(vcpu, exit_fastpath); in vcpu_enter_guest()
9301 kvm_x86_ops.cancel_injection(vcpu); in vcpu_enter_guest()
9311 (!kvm_x86_ops.pre_block || kvm_x86_ops.pre_block(vcpu) == 0)) { in vcpu_block()
9316 if (kvm_x86_ops.post_block) in vcpu_block()
9317 kvm_x86_ops.post_block(vcpu); in vcpu_block()
9344 kvm_x86_ops.nested_ops->check_events(vcpu); in kvm_vcpu_running()
9698 kvm_x86_ops.get_idt(vcpu, &dt); in __get_sregs()
9701 kvm_x86_ops.get_gdt(vcpu, &dt); in __get_sregs()
9850 kvm_x86_ops.set_idt(vcpu, &dt); in __set_sregs()
9853 kvm_x86_ops.set_gdt(vcpu, &dt); in __set_sregs()
9863 kvm_x86_ops.set_efer(vcpu, sregs->efer); in __set_sregs()
9866 kvm_x86_ops.set_cr0(vcpu, sregs->cr0); in __set_sregs()
9872 kvm_x86_ops.set_cr4(vcpu, sregs->cr4); in __set_sregs()
9978 kvm_x86_ops.update_exception_bitmap(vcpu); in kvm_arch_vcpu_ioctl_set_guest_debug()
10188 r = kvm_x86_ops.vcpu_create(vcpu); in kvm_arch_vcpu_create()
10251 kvm_x86_ops.vcpu_free(vcpu); in kvm_arch_vcpu_destroy()
10340 kvm_x86_ops.vcpu_reset(vcpu, init_event); in kvm_vcpu_reset()
10365 ret = kvm_x86_ops.hardware_enable(); in kvm_arch_hardware_enable()
10447 kvm_x86_ops.hardware_disable(); in kvm_arch_hardware_disable()
10465 memcpy(&kvm_x86_ops, ops->runtime_ops, sizeof(kvm_x86_ops)); in kvm_arch_hardware_setup()
10494 kvm_x86_ops.hardware_unsetup(); in kvm_arch_hardware_unsetup()
10534 kvm_x86_ops.sched_in(vcpu, cpu); in kvm_arch_sched_in()
10583 return kvm_x86_ops.vm_init(kvm); in kvm_arch_init_vm()
10701 if (kvm_x86_ops.vm_destroy) in kvm_arch_destroy_vm()
10702 kvm_x86_ops.vm_destroy(kvm); in kvm_arch_destroy_vm()
10891 if (kvm_x86_ops.slot_enable_log_dirty) { in kvm_mmu_slot_apply_flags()
10892 kvm_x86_ops.slot_enable_log_dirty(kvm, new); in kvm_mmu_slot_apply_flags()
10909 if (kvm_x86_ops.slot_disable_log_dirty) in kvm_mmu_slot_apply_flags()
10910 kvm_x86_ops.slot_disable_log_dirty(kvm, new); in kvm_mmu_slot_apply_flags()
10948 kvm_x86_ops.guest_apic_has_interrupt && in kvm_guest_apic_has_interrupt()
10949 kvm_x86_ops.guest_apic_has_interrupt(vcpu)); in kvm_guest_apic_has_interrupt()
10968 kvm_x86_ops.nmi_allowed(vcpu, false))) in kvm_vcpu_has_events()
10973 kvm_x86_ops.smi_allowed(vcpu, false))) in kvm_vcpu_has_events()
10985 kvm_x86_ops.nested_ops->hv_timer_pending && in kvm_vcpu_has_events()
10986 kvm_x86_ops.nested_ops->hv_timer_pending(vcpu)) in kvm_vcpu_has_events()
11007 if (vcpu->arch.apicv_active && kvm_x86_ops.dy_apicv_has_pending_interrupt(vcpu)) in kvm_arch_dy_runnable()
11025 return kvm_x86_ops.interrupt_allowed(vcpu, false); in kvm_arch_interrupt_allowed()
11047 rflags = kvm_x86_ops.get_rflags(vcpu); in kvm_get_rflags()
11059 kvm_x86_ops.set_rflags(vcpu, rflags); in __kvm_set_rflags()
11189 (vcpu->arch.apf.send_user_only && kvm_x86_ops.get_cpl(vcpu) == 0)) in kvm_can_deliver_async_pf()
11334 ret = kvm_x86_ops.update_pi_irte(irqfd->kvm, in kvm_arch_irq_bypass_add_producer()
11359 ret = kvm_x86_ops.update_pi_irte(irqfd->kvm, prod->irq, irqfd->gsi, 0); in kvm_arch_irq_bypass_del_producer()
11370 return kvm_x86_ops.update_pi_irte(kvm, host_irq, guest_irq, set); in kvm_arch_update_irqfd_routing()