Lines Matching +full:ecx +full:- +full:1000
1 // SPDX-License-Identifier: GPL-2.0-only
3 * Kernel-based Virtual Machine driver for Linux
16 * Ben-Ami Yassour <benami@il.ibm.com>
43 #include <linux/intel-iommu.h>
45 #include <linux/user-return-notifier.h>
59 #include <linux/entry-kvm.h>
88 ((struct kvm_vcpu *)(ctxt)->vcpu)
91 * - enable syscall per default because its emulated by KVM
92 * - enable LME and LMA per default on 64 bit KVM
140 /* tsc tolerance in parts per million - default to 1/2 of the NTP threshold */
145 * lapic timer advance (tscdeadline mode only) in nanoseconds. '-1' enables
146 * adaptive tuning starting from default advancment of 1000ns. '0' disables
147 * advancement entirely. Any other value is used as-is and disables adaptive
150 static int __read_mostly lapic_timer_advance_ns = -1;
163 int __read_mostly pi_inject_timer = -1;
286 size - useroffset, NULL); in kvm_alloc_emulator_cache()
295 vcpu->arch.apf.gfns[i] = ~0; in kvm_async_pf_hash_reset()
311 if (msrs->registered) { in kvm_on_user_return()
312 msrs->registered = false; in kvm_on_user_return()
317 values = &msrs->values[slot]; in kvm_on_user_return()
318 if (values->host != values->curr) { in kvm_on_user_return()
319 wrmsrl(user_return_msrs_global.msrs[slot], values->host); in kvm_on_user_return()
320 values->curr = values->host; in kvm_on_user_return()
359 msrs->values[i].host = value; in kvm_user_return_msr_cpu_online()
360 msrs->values[i].curr = value; in kvm_user_return_msr_cpu_online()
370 value = (value & mask) | (msrs->values[slot].host & ~mask); in kvm_set_user_return_msr()
371 if (value == msrs->values[slot].curr) in kvm_set_user_return_msr()
377 msrs->values[slot].curr = value; in kvm_set_user_return_msr()
378 if (!msrs->registered) { in kvm_set_user_return_msr()
379 msrs->urn.on_user_return = kvm_on_user_return; in kvm_set_user_return_msr()
380 user_return_notifier_register(&msrs->urn); in kvm_set_user_return_msr()
381 msrs->registered = true; in kvm_set_user_return_msr()
392 if (msrs->registered) in drop_user_return_notifiers()
393 kvm_on_user_return(&msrs->urn); in drop_user_return_notifiers()
398 return vcpu->arch.apic_base; in kvm_get_apic_base()
411 enum lapic_mode new_mode = kvm_apic_mode(msr_info->data); in kvm_set_apic_base()
415 if ((msr_info->data & reserved_bits) != 0 || new_mode == LAPIC_MODE_INVALID) in kvm_set_apic_base()
417 if (!msr_info->host_initiated) { in kvm_set_apic_base()
424 kvm_lapic_set_base(vcpu, msr_info->data); in kvm_set_apic_base()
425 kvm_recalculate_apic_map(vcpu->kvm); in kvm_set_apic_base()
474 * #DBs can be trap-like or fault-like, the caller must check other CPU in exception_type()
492 unsigned nr = vcpu->arch.exception.nr; in kvm_deliver_exception_payload()
493 bool has_payload = vcpu->arch.exception.has_payload; in kvm_deliver_exception_payload()
494 unsigned long payload = vcpu->arch.exception.payload; in kvm_deliver_exception_payload()
502 * "Certain debug exceptions may clear bit 0-3. The in kvm_deliver_exception_payload()
506 vcpu->arch.dr6 &= ~DR_TRAP_BITS; in kvm_deliver_exception_payload()
510 vcpu->arch.dr6 |= DR6_RTM; in kvm_deliver_exception_payload()
511 vcpu->arch.dr6 |= payload; in kvm_deliver_exception_payload()
520 vcpu->arch.dr6 ^= payload & DR6_RTM; in kvm_deliver_exception_payload()
528 vcpu->arch.dr6 &= ~BIT(12); in kvm_deliver_exception_payload()
531 vcpu->arch.cr2 = payload; in kvm_deliver_exception_payload()
535 vcpu->arch.exception.has_payload = false; in kvm_deliver_exception_payload()
536 vcpu->arch.exception.payload = 0; in kvm_deliver_exception_payload()
549 if (!vcpu->arch.exception.pending && !vcpu->arch.exception.injected) { in kvm_multiple_exception()
553 * On vmentry, vcpu->arch.exception.pending is only in kvm_multiple_exception()
560 WARN_ON_ONCE(vcpu->arch.exception.pending); in kvm_multiple_exception()
561 vcpu->arch.exception.injected = true; in kvm_multiple_exception()
571 vcpu->arch.exception.pending = true; in kvm_multiple_exception()
572 vcpu->arch.exception.injected = false; in kvm_multiple_exception()
574 vcpu->arch.exception.has_error_code = has_error; in kvm_multiple_exception()
575 vcpu->arch.exception.nr = nr; in kvm_multiple_exception()
576 vcpu->arch.exception.error_code = error_code; in kvm_multiple_exception()
577 vcpu->arch.exception.has_payload = has_payload; in kvm_multiple_exception()
578 vcpu->arch.exception.payload = payload; in kvm_multiple_exception()
585 prev_nr = vcpu->arch.exception.nr; in kvm_multiple_exception()
587 /* triple fault -> shutdown */ in kvm_multiple_exception()
596 * Generate double fault per SDM Table 5-5. Set in kvm_multiple_exception()
600 vcpu->arch.exception.pending = true; in kvm_multiple_exception()
601 vcpu->arch.exception.injected = false; in kvm_multiple_exception()
602 vcpu->arch.exception.has_error_code = true; in kvm_multiple_exception()
603 vcpu->arch.exception.nr = DF_VECTOR; in kvm_multiple_exception()
604 vcpu->arch.exception.error_code = 0; in kvm_multiple_exception()
605 vcpu->arch.exception.has_payload = false; in kvm_multiple_exception()
606 vcpu->arch.exception.payload = 0; in kvm_multiple_exception()
609 that instruction re-execution will regenerate lost in kvm_multiple_exception()
653 ++vcpu->stat.pf_guest; in kvm_inject_page_fault()
654 vcpu->arch.exception.nested_apf = in kvm_inject_page_fault()
655 is_guest_mode(vcpu) && fault->async_page_fault; in kvm_inject_page_fault()
656 if (vcpu->arch.exception.nested_apf) { in kvm_inject_page_fault()
657 vcpu->arch.apf.nested_apf_token = fault->address; in kvm_inject_page_fault()
658 kvm_queue_exception_e(vcpu, PF_VECTOR, fault->error_code); in kvm_inject_page_fault()
660 kvm_queue_exception_e_p(vcpu, PF_VECTOR, fault->error_code, in kvm_inject_page_fault()
661 fault->address); in kvm_inject_page_fault()
670 WARN_ON_ONCE(fault->vector != PF_VECTOR); in kvm_inject_emulated_page_fault()
672 fault_mmu = fault->nested_page_fault ? vcpu->arch.mmu : in kvm_inject_emulated_page_fault()
673 vcpu->arch.walk_mmu; in kvm_inject_emulated_page_fault()
679 if ((fault->error_code & PFERR_PRESENT_MASK) && in kvm_inject_emulated_page_fault()
680 !(fault->error_code & PFERR_RSVD_MASK)) in kvm_inject_emulated_page_fault()
681 kvm_mmu_invalidate_gva(vcpu, fault_mmu, fault->address, in kvm_inject_emulated_page_fault()
682 fault_mmu->root_hpa); in kvm_inject_emulated_page_fault()
684 fault_mmu->inject_page_fault(vcpu, fault); in kvm_inject_emulated_page_fault()
685 return fault->nested_page_fault; in kvm_inject_emulated_page_fault()
691 atomic_inc(&vcpu->arch.nmi_queued); in kvm_inject_nmi()
745 real_gfn = mmu->translate_gpa(vcpu, ngpa, access, &exception); in kvm_read_guest_page_mmu()
747 return -EFAULT; in kvm_read_guest_page_mmu()
758 return kvm_read_guest_page_mmu(vcpu, vcpu->arch.walk_mmu, gfn, in kvm_read_nested_guest_page()
774 unsigned offset = ((cr3 & (PAGE_SIZE-1)) >> 5) << 2; in load_pdptrs()
777 u64 pdpte[ARRAY_SIZE(mmu->pdptrs)]; in load_pdptrs()
795 memcpy(mmu->pdptrs, pdpte, sizeof(mmu->pdptrs)); in load_pdptrs()
806 u64 pdpte[ARRAY_SIZE(vcpu->arch.walk_mmu->pdptrs)]; in pdptrs_changed()
818 offset = (kvm_read_cr3(vcpu) & 0xffffffe0ul) & (PAGE_SIZE - 1); in pdptrs_changed()
824 return memcmp(pdpte, vcpu->arch.walk_mmu->pdptrs, sizeof(pdpte)) != 0; in pdptrs_changed()
850 if ((vcpu->arch.efer & EFER_LME) && !is_paging(vcpu) && in kvm_set_cr0()
861 if (!(vcpu->arch.efer & EFER_LME) && (cr0 & X86_CR0_PG) && in kvm_set_cr0()
863 !load_pdptrs(vcpu, vcpu->arch.walk_mmu, kvm_read_cr3(vcpu))) in kvm_set_cr0()
880 kvm_arch_has_noncoherent_dma(vcpu->kvm) && in kvm_set_cr0()
881 !kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_CD_NW_CLEARED)) in kvm_set_cr0()
882 kvm_zap_gfn_range(vcpu->kvm, 0, ~0ULL); in kvm_set_cr0()
898 if (vcpu->arch.xcr0 != host_xcr0) in kvm_load_guest_xsave_state()
899 xsetbv(XCR_XFEATURE_ENABLED_MASK, vcpu->arch.xcr0); in kvm_load_guest_xsave_state()
901 if (vcpu->arch.xsaves_enabled && in kvm_load_guest_xsave_state()
902 vcpu->arch.ia32_xss != host_xss) in kvm_load_guest_xsave_state()
903 wrmsrl(MSR_IA32_XSS, vcpu->arch.ia32_xss); in kvm_load_guest_xsave_state()
908 (vcpu->arch.xcr0 & XFEATURE_MASK_PKRU)) && in kvm_load_guest_xsave_state()
909 vcpu->arch.pkru != vcpu->arch.host_pkru) in kvm_load_guest_xsave_state()
910 __write_pkru(vcpu->arch.pkru); in kvm_load_guest_xsave_state()
918 (vcpu->arch.xcr0 & XFEATURE_MASK_PKRU))) { in kvm_load_host_xsave_state()
919 vcpu->arch.pkru = rdpkru(); in kvm_load_host_xsave_state()
920 if (vcpu->arch.pkru != vcpu->arch.host_pkru) in kvm_load_host_xsave_state()
921 __write_pkru(vcpu->arch.host_pkru); in kvm_load_host_xsave_state()
926 if (vcpu->arch.xcr0 != host_xcr0) in kvm_load_host_xsave_state()
929 if (vcpu->arch.xsaves_enabled && in kvm_load_host_xsave_state()
930 vcpu->arch.ia32_xss != host_xss) in kvm_load_host_xsave_state()
940 u64 old_xcr0 = vcpu->arch.xcr0; in __kvm_set_xcr()
956 valid_bits = vcpu->arch.guest_supported_xcr0 | XFEATURE_MASK_FP; in __kvm_set_xcr()
970 vcpu->arch.xcr0 = xcr0; in __kvm_set_xcr()
991 return -EINVAL; in kvm_valid_cr4()
993 if (cr4 & vcpu->arch.cr4_guest_rsvd_bits) in kvm_valid_cr4()
994 return -EINVAL; in kvm_valid_cr4()
997 return -EINVAL; in kvm_valid_cr4()
1020 && !load_pdptrs(vcpu, vcpu->arch.walk_mmu, in kvm_set_cr4()
1067 (cr3 & vcpu->arch.cr3_lm_rsvd_bits)) in kvm_set_cr3()
1070 !load_pdptrs(vcpu, vcpu->arch.walk_mmu, cr3)) in kvm_set_cr3()
1074 vcpu->arch.cr3 = cr3; in kvm_set_cr3()
1088 vcpu->arch.cr8 = cr8; in kvm_set_cr8()
1098 return vcpu->arch.cr8; in kvm_get_cr8()
1106 if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)) { in kvm_update_dr0123()
1108 vcpu->arch.eff_db[i] = vcpu->arch.db[i]; in kvm_update_dr0123()
1109 vcpu->arch.switch_db_regs |= KVM_DEBUGREG_RELOAD; in kvm_update_dr0123()
1117 if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) in kvm_update_dr7()
1118 dr7 = vcpu->arch.guest_debug_dr7; in kvm_update_dr7()
1120 dr7 = vcpu->arch.dr7; in kvm_update_dr7()
1122 vcpu->arch.switch_db_regs &= ~KVM_DEBUGREG_BP_ENABLED; in kvm_update_dr7()
1124 vcpu->arch.switch_db_regs |= KVM_DEBUGREG_BP_ENABLED; in kvm_update_dr7()
1139 size_t size = ARRAY_SIZE(vcpu->arch.db); in __kvm_set_dr()
1143 vcpu->arch.db[array_index_nospec(dr, size)] = val; in __kvm_set_dr()
1144 if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)) in __kvm_set_dr()
1145 vcpu->arch.eff_db[dr] = val; in __kvm_set_dr()
1150 return -1; /* #GP */ in __kvm_set_dr()
1151 vcpu->arch.dr6 = (val & DR6_VOLATILE) | kvm_dr6_fixed(vcpu); in __kvm_set_dr()
1156 return -1; /* #GP */ in __kvm_set_dr()
1157 vcpu->arch.dr7 = (val & DR7_VOLATILE) | DR7_FIXED_1; in __kvm_set_dr()
1177 size_t size = ARRAY_SIZE(vcpu->arch.db); in kvm_get_dr()
1181 *val = vcpu->arch.db[array_index_nospec(dr, size)]; in kvm_get_dr()
1185 *val = vcpu->arch.dr6; in kvm_get_dr()
1189 *val = vcpu->arch.dr7; in kvm_get_dr()
1198 u32 ecx = kvm_rcx_read(vcpu); in kvm_rdpmc() local
1202 err = kvm_pmu_rdpmc(vcpu, ecx, &data); in kvm_rdpmc()
1219 * kvm-specific. Those are put in emulated_msrs_all; filtering of emulated_msrs
1342 * List of msr numbers which are used to expose MSR-based features that
1377 * 10 - MISC_PACKAGE_CTRLS
1378 * 11 - ENERGY_FILTERING_CTL
1379 * 12 - DOITM
1380 * 18 - FB_CLEAR_CTRL
1381 * 21 - XAPIC_DISABLE_STATUS
1382 * 23 - OVERCLOCKING_STATUS
1452 switch (msr->index) { in kvm_get_msr_feature()
1454 msr->data = kvm_get_arch_capabilities(); in kvm_get_msr_feature()
1457 rdmsrl_safe(msr->index, &msr->data); in kvm_get_msr_feature()
1517 u64 old_efer = vcpu->arch.efer; in set_efer()
1518 u64 efer = msr_info->data; in set_efer()
1524 if (!msr_info->host_initiated) { in set_efer()
1529 (vcpu->arch.efer & EFER_LME) != (efer & EFER_LME)) in set_efer()
1534 efer |= vcpu->arch.efer & EFER_LMA; in set_efer()
1559 struct kvm *kvm = vcpu->kvm; in kvm_msr_allowed()
1568 idx = srcu_read_lock(&kvm->srcu); in kvm_msr_allowed()
1570 msr_filter = srcu_dereference(kvm->arch.msr_filter, &kvm->srcu); in kvm_msr_allowed()
1576 allowed = msr_filter->default_allow; in kvm_msr_allowed()
1577 ranges = msr_filter->ranges; in kvm_msr_allowed()
1579 for (i = 0; i < msr_filter->count; i++) { in kvm_msr_allowed()
1586 allowed = !!test_bit(index - start, bitmap); in kvm_msr_allowed()
1592 srcu_read_unlock(&kvm->srcu, idx); in kvm_msr_allowed()
1601 * Returns 0 on success, non-0 otherwise.
1625 * non-canonical address is written on Intel but not on in __kvm_set_msr()
1626 * AMD (which ignores the top 32-bits, because it does in __kvm_set_msr()
1627 * not implement 64-bit SYSENTER). in __kvm_set_msr()
1629 * 64-bit code should hence be able to write a non-canonical in __kvm_set_msr()
1631 * vmentry does not fail on Intel after writing a non-canonical in __kvm_set_msr()
1633 * invokes 64-bit SYSENTER. in __kvm_set_msr()
1660 * Returns 0 on success, non-0 otherwise.
1710 if (vcpu->run->msr.error) { in complete_emulated_msr()
1714 kvm_rax_write(vcpu, (u32)vcpu->run->msr.data); in complete_emulated_msr()
1715 kvm_rdx_write(vcpu, vcpu->run->msr.data >> 32); in complete_emulated_msr()
1751 if (!(vcpu->kvm->arch.user_space_msr_mask & msr_reason)) in kvm_msr_user_space()
1754 vcpu->run->exit_reason = exit_reason; in kvm_msr_user_space()
1755 vcpu->run->msr.error = 0; in kvm_msr_user_space()
1756 memset(vcpu->run->msr.pad, 0, sizeof(vcpu->run->msr.pad)); in kvm_msr_user_space()
1757 vcpu->run->msr.reason = msr_reason; in kvm_msr_user_space()
1758 vcpu->run->msr.index = index; in kvm_msr_user_space()
1759 vcpu->run->msr.data = data; in kvm_msr_user_space()
1760 vcpu->arch.complete_userspace_io = completion; in kvm_msr_user_space()
1779 u32 ecx = kvm_rcx_read(vcpu); in kvm_emulate_rdmsr() local
1783 r = kvm_get_msr(vcpu, ecx, &data); in kvm_emulate_rdmsr()
1786 if (r && kvm_get_msr_user_space(vcpu, ecx, r)) { in kvm_emulate_rdmsr()
1793 trace_kvm_msr_read_ex(ecx); in kvm_emulate_rdmsr()
1798 trace_kvm_msr_read(ecx, data); in kvm_emulate_rdmsr()
1800 kvm_rax_write(vcpu, data & -1u); in kvm_emulate_rdmsr()
1801 kvm_rdx_write(vcpu, (data >> 32) & -1u); in kvm_emulate_rdmsr()
1808 u32 ecx = kvm_rcx_read(vcpu); in kvm_emulate_wrmsr() local
1812 r = kvm_set_msr(vcpu, ecx, data); in kvm_emulate_wrmsr()
1815 if (r && kvm_set_msr_user_space(vcpu, ecx, data, r)) in kvm_emulate_wrmsr()
1825 trace_kvm_msr_write_ex(ecx, data); in kvm_emulate_wrmsr()
1830 trace_kvm_msr_write(ecx, data); in kvm_emulate_wrmsr()
1837 return vcpu->mode == EXITING_GUEST_MODE || kvm_request_pending(vcpu) || in kvm_vcpu_exit_request()
1844 * i.e. the sending of IPI, sending IPI early in the VM-Exit flow reduces
1851 if (!lapic_in_kernel(vcpu) || !apic_x2apic_mode(vcpu->arch.apic)) in handle_fastpath_set_x2apic_icr_irqoff()
1860 kvm_apic_send_ipi(vcpu->arch.apic, (u32)data, (u32)(data >> 32)); in handle_fastpath_set_x2apic_icr_irqoff()
1861 kvm_lapic_set_reg(vcpu->arch.apic, APIC_ICR2, (u32)(data >> 32)); in handle_fastpath_set_x2apic_icr_irqoff()
1862 kvm_lapic_set_reg(vcpu->arch.apic, APIC_ICR, (u32)data); in handle_fastpath_set_x2apic_icr_irqoff()
1951 write_seqcount_begin(&vdata->seq); in update_pvclock_gtod()
1954 vdata->clock.vclock_mode = tk->tkr_mono.clock->vdso_clock_mode; in update_pvclock_gtod()
1955 vdata->clock.cycle_last = tk->tkr_mono.cycle_last; in update_pvclock_gtod()
1956 vdata->clock.mask = tk->tkr_mono.mask; in update_pvclock_gtod()
1957 vdata->clock.mult = tk->tkr_mono.mult; in update_pvclock_gtod()
1958 vdata->clock.shift = tk->tkr_mono.shift; in update_pvclock_gtod()
1959 vdata->clock.base_cycles = tk->tkr_mono.xtime_nsec; in update_pvclock_gtod()
1960 vdata->clock.offset = tk->tkr_mono.base; in update_pvclock_gtod()
1962 vdata->raw_clock.vclock_mode = tk->tkr_raw.clock->vdso_clock_mode; in update_pvclock_gtod()
1963 vdata->raw_clock.cycle_last = tk->tkr_raw.cycle_last; in update_pvclock_gtod()
1964 vdata->raw_clock.mask = tk->tkr_raw.mask; in update_pvclock_gtod()
1965 vdata->raw_clock.mult = tk->tkr_raw.mult; in update_pvclock_gtod()
1966 vdata->raw_clock.shift = tk->tkr_raw.shift; in update_pvclock_gtod()
1967 vdata->raw_clock.base_cycles = tk->tkr_raw.xtime_nsec; in update_pvclock_gtod()
1968 vdata->raw_clock.offset = tk->tkr_raw.base; in update_pvclock_gtod()
1970 vdata->wall_time_sec = tk->xtime_sec; in update_pvclock_gtod()
1972 vdata->offs_boot = tk->offs_boot; in update_pvclock_gtod()
1974 write_seqcount_end(&vdata->seq); in update_pvclock_gtod()
1997 kvm->arch.wall_clock = wall_clock; in kvm_write_wall_clock()
2019 wall_nsec = ktime_get_real_ns() - get_kvmclock_ns(kvm); in kvm_write_wall_clock()
2034 struct kvm_arch *ka = &vcpu->kvm->arch; in kvm_write_system_time()
2036 if (vcpu->vcpu_id == 0 && !host_initiated) { in kvm_write_system_time()
2037 if (ka->boot_vcpu_runs_old_kvmclock != old_msr) in kvm_write_system_time()
2040 ka->boot_vcpu_runs_old_kvmclock = old_msr; in kvm_write_system_time()
2043 vcpu->arch.time = system_time; in kvm_write_system_time()
2047 vcpu->arch.pv_time_enabled = false; in kvm_write_system_time()
2051 if (!kvm_gfn_to_hva_cache_init(vcpu->kvm, in kvm_write_system_time()
2052 &vcpu->arch.pv_time, system_time & ~1ULL, in kvm_write_system_time()
2054 vcpu->arch.pv_time_enabled = true; in kvm_write_system_time()
2077 shift--; in kvm_get_time_scale()
2113 vcpu->arch.tsc_scaling_ratio = kvm_default_tsc_scaling_ratio; in set_tsc_khz()
2120 vcpu->arch.tsc_catchup = 1; in set_tsc_khz()
2121 vcpu->arch.tsc_always_catchup = 1; in set_tsc_khz()
2125 return -1; in set_tsc_khz()
2129 /* TSC scaling required - calculate ratio */ in set_tsc_khz()
2134 pr_warn_ratelimited("Invalid TSC scaling ratio - virtual-tsc-khz=%u\n", in set_tsc_khz()
2136 return -1; in set_tsc_khz()
2139 vcpu->arch.tsc_scaling_ratio = ratio; in set_tsc_khz()
2151 vcpu->arch.tsc_scaling_ratio = kvm_default_tsc_scaling_ratio; in kvm_set_tsc_khz()
2152 return -1; in kvm_set_tsc_khz()
2156 kvm_get_time_scale(user_tsc_khz * 1000LL, NSEC_PER_SEC, in kvm_set_tsc_khz()
2157 &vcpu->arch.virtual_tsc_shift, in kvm_set_tsc_khz()
2158 &vcpu->arch.virtual_tsc_mult); in kvm_set_tsc_khz()
2159 vcpu->arch.virtual_tsc_khz = user_tsc_khz; in kvm_set_tsc_khz()
2167 thresh_lo = adjust_tsc_khz(tsc_khz, -tsc_tolerance_ppm); in kvm_set_tsc_khz()
2178 u64 tsc = pvclock_scale_delta(kernel_ns-vcpu->arch.this_tsc_nsec, in compute_guest_tsc()
2179 vcpu->arch.virtual_tsc_mult, in compute_guest_tsc()
2180 vcpu->arch.virtual_tsc_shift); in compute_guest_tsc()
2181 tsc += vcpu->arch.this_tsc_write; in compute_guest_tsc()
2194 struct kvm_arch *ka = &vcpu->kvm->arch; in kvm_track_tsc_matching()
2197 vcpus_matched = (ka->nr_vcpus_matched_tsc + 1 == in kvm_track_tsc_matching()
2198 atomic_read(&vcpu->kvm->online_vcpus)); in kvm_track_tsc_matching()
2208 if (ka->use_master_clock || in kvm_track_tsc_matching()
2209 (gtod_is_based_on_tsc(gtod->clock.vclock_mode) && vcpus_matched)) in kvm_track_tsc_matching()
2212 trace_kvm_track_tsc(vcpu->vcpu_id, ka->nr_vcpus_matched_tsc, in kvm_track_tsc_matching()
2213 atomic_read(&vcpu->kvm->online_vcpus), in kvm_track_tsc_matching()
2214 ka->use_master_clock, gtod->clock.vclock_mode); in kvm_track_tsc_matching()
2221 * The most significant 64-N bits (mult) of ratio represent the
2224 * point number (mult + frac * 2^(-N)).
2236 u64 ratio = vcpu->arch.tsc_scaling_ratio; in kvm_scale_tsc()
2251 return target_tsc - tsc; in kvm_compute_tsc_offset()
2256 return vcpu->arch.l1_tsc_offset + kvm_scale_tsc(vcpu, host_tsc); in kvm_read_l1_tsc()
2262 vcpu->arch.l1_tsc_offset = offset; in kvm_vcpu_write_tsc_offset()
2263 vcpu->arch.tsc_offset = kvm_x86_ops.write_l1_tsc_offset(vcpu, offset); in kvm_vcpu_write_tsc_offset()
2270 * TSC is marked unstable when we're running on Hyper-V, in kvm_check_tsc_unstable()
2281 struct kvm *kvm = vcpu->kvm; in kvm_synchronize_tsc()
2288 raw_spin_lock_irqsave(&kvm->arch.tsc_write_lock, flags); in kvm_synchronize_tsc()
2291 elapsed = ns - kvm->arch.last_tsc_nsec; in kvm_synchronize_tsc()
2293 if (vcpu->arch.virtual_tsc_khz) { in kvm_synchronize_tsc()
2296 * detection of vcpu initialization -- need to sync in kvm_synchronize_tsc()
2302 u64 tsc_exp = kvm->arch.last_tsc_write + in kvm_synchronize_tsc()
2304 u64 tsc_hz = vcpu->arch.virtual_tsc_khz * 1000LL; in kvm_synchronize_tsc()
2322 vcpu->arch.virtual_tsc_khz == kvm->arch.last_tsc_khz) { in kvm_synchronize_tsc()
2324 offset = kvm->arch.cur_tsc_offset; in kvm_synchronize_tsc()
2331 already_matched = (vcpu->arch.this_tsc_generation == kvm->arch.cur_tsc_generation); in kvm_synchronize_tsc()
2340 * These values are tracked in kvm->arch.cur_xxx variables. in kvm_synchronize_tsc()
2342 kvm->arch.cur_tsc_generation++; in kvm_synchronize_tsc()
2343 kvm->arch.cur_tsc_nsec = ns; in kvm_synchronize_tsc()
2344 kvm->arch.cur_tsc_write = data; in kvm_synchronize_tsc()
2345 kvm->arch.cur_tsc_offset = offset; in kvm_synchronize_tsc()
2353 kvm->arch.last_tsc_nsec = ns; in kvm_synchronize_tsc()
2354 kvm->arch.last_tsc_write = data; in kvm_synchronize_tsc()
2355 kvm->arch.last_tsc_khz = vcpu->arch.virtual_tsc_khz; in kvm_synchronize_tsc()
2357 vcpu->arch.last_guest_tsc = data; in kvm_synchronize_tsc()
2360 vcpu->arch.this_tsc_generation = kvm->arch.cur_tsc_generation; in kvm_synchronize_tsc()
2361 vcpu->arch.this_tsc_nsec = kvm->arch.cur_tsc_nsec; in kvm_synchronize_tsc()
2362 vcpu->arch.this_tsc_write = kvm->arch.cur_tsc_write; in kvm_synchronize_tsc()
2365 raw_spin_unlock_irqrestore(&kvm->arch.tsc_write_lock, flags); in kvm_synchronize_tsc()
2367 spin_lock(&kvm->arch.pvclock_gtod_sync_lock); in kvm_synchronize_tsc()
2369 kvm->arch.nr_vcpus_matched_tsc = 0; in kvm_synchronize_tsc()
2371 kvm->arch.nr_vcpus_matched_tsc++; in kvm_synchronize_tsc()
2375 spin_unlock(&kvm->arch.pvclock_gtod_sync_lock); in kvm_synchronize_tsc()
2381 u64 tsc_offset = vcpu->arch.l1_tsc_offset; in adjust_tsc_offset_guest()
2387 if (vcpu->arch.tsc_scaling_ratio != kvm_default_tsc_scaling_ratio) in adjust_tsc_offset_host()
2421 switch (clock->vclock_mode) { in vgettsc()
2428 v = (tsc_pg_val - clock->cycle_last) & in vgettsc()
2429 clock->mask; in vgettsc()
2438 v = (*tsc_timestamp - clock->cycle_last) & in vgettsc()
2439 clock->mask; in vgettsc()
2448 return v * clock->mult; in vgettsc()
2459 seq = read_seqcount_begin(>od->seq); in do_monotonic_raw()
2460 ns = gtod->raw_clock.base_cycles; in do_monotonic_raw()
2461 ns += vgettsc(>od->raw_clock, tsc_timestamp, &mode); in do_monotonic_raw()
2462 ns >>= gtod->raw_clock.shift; in do_monotonic_raw()
2463 ns += ktime_to_ns(ktime_add(gtod->raw_clock.offset, gtod->offs_boot)); in do_monotonic_raw()
2464 } while (unlikely(read_seqcount_retry(>od->seq, seq))); in do_monotonic_raw()
2478 seq = read_seqcount_begin(>od->seq); in do_realtime()
2479 ts->tv_sec = gtod->wall_time_sec; in do_realtime()
2480 ns = gtod->clock.base_cycles; in do_realtime()
2481 ns += vgettsc(>od->clock, tsc_timestamp, &mode); in do_realtime()
2482 ns >>= gtod->clock.shift; in do_realtime()
2483 } while (unlikely(read_seqcount_retry(>od->seq, seq))); in do_realtime()
2485 ts->tv_sec += __iter_div_u64_rem(ns, NSEC_PER_SEC, &ns); in do_realtime()
2486 ts->tv_nsec = ns; in do_realtime()
2530 * 4. ret0 = timespec0 + (rdtsc - tsc0) |
2531 * 5. | ret1 = timespec1 + (rdtsc - tsc1)
2532 * | ret1 = timespec0 + N + (rdtsc - (tsc0 + M))
2536 * - ret0 < ret1
2537 * - timespec0 + (rdtsc - tsc0) < timespec0 + N + (rdtsc - (tsc0 + M))
2539 * - 0 < N - M => M < N
2558 struct kvm_arch *ka = &kvm->arch; in pvclock_update_vm_gtod_copy()
2562 vcpus_matched = (ka->nr_vcpus_matched_tsc + 1 == in pvclock_update_vm_gtod_copy()
2563 atomic_read(&kvm->online_vcpus)); in pvclock_update_vm_gtod_copy()
2570 &ka->master_kernel_ns, in pvclock_update_vm_gtod_copy()
2571 &ka->master_cycle_now); in pvclock_update_vm_gtod_copy()
2573 ka->use_master_clock = host_tsc_clocksource && vcpus_matched in pvclock_update_vm_gtod_copy()
2574 && !ka->backwards_tsc_observed in pvclock_update_vm_gtod_copy()
2575 && !ka->boot_vcpu_runs_old_kvmclock; in pvclock_update_vm_gtod_copy()
2577 if (ka->use_master_clock) in pvclock_update_vm_gtod_copy()
2581 trace_kvm_update_master_clock(ka->use_master_clock, vclock_mode, in pvclock_update_vm_gtod_copy()
2596 struct kvm_arch *ka = &kvm->arch; in kvm_gen_update_masterclock()
2598 spin_lock(&ka->pvclock_gtod_sync_lock); in kvm_gen_update_masterclock()
2610 spin_unlock(&ka->pvclock_gtod_sync_lock); in kvm_gen_update_masterclock()
2616 struct kvm_arch *ka = &kvm->arch; in get_kvmclock_ns()
2620 spin_lock(&ka->pvclock_gtod_sync_lock); in get_kvmclock_ns()
2621 if (!ka->use_master_clock) { in get_kvmclock_ns()
2622 spin_unlock(&ka->pvclock_gtod_sync_lock); in get_kvmclock_ns()
2623 return get_kvmclock_base_ns() + ka->kvmclock_offset; in get_kvmclock_ns()
2626 hv_clock.tsc_timestamp = ka->master_cycle_now; in get_kvmclock_ns()
2627 hv_clock.system_time = ka->master_kernel_ns + ka->kvmclock_offset; in get_kvmclock_ns()
2628 spin_unlock(&ka->pvclock_gtod_sync_lock); in get_kvmclock_ns()
2634 kvm_get_time_scale(NSEC_PER_SEC, __this_cpu_read(cpu_tsc_khz) * 1000LL, in get_kvmclock_ns()
2639 ret = get_kvmclock_base_ns() + ka->kvmclock_offset; in get_kvmclock_ns()
2648 struct kvm_vcpu_arch *vcpu = &v->arch; in kvm_setup_pvclock_page()
2651 if (unlikely(kvm_read_guest_cached(v->kvm, &vcpu->pv_time, in kvm_setup_pvclock_page()
2666 * and third write. The vcpu->pv_time cache is still valid, because the in kvm_setup_pvclock_page()
2674 vcpu->hv_clock.version = guest_hv_clock.version + 1; in kvm_setup_pvclock_page()
2675 kvm_write_guest_cached(v->kvm, &vcpu->pv_time, in kvm_setup_pvclock_page()
2676 &vcpu->hv_clock, in kvm_setup_pvclock_page()
2677 sizeof(vcpu->hv_clock.version)); in kvm_setup_pvclock_page()
2682 vcpu->hv_clock.flags |= (guest_hv_clock.flags & PVCLOCK_GUEST_STOPPED); in kvm_setup_pvclock_page()
2684 if (vcpu->pvclock_set_guest_stopped_request) { in kvm_setup_pvclock_page()
2685 vcpu->hv_clock.flags |= PVCLOCK_GUEST_STOPPED; in kvm_setup_pvclock_page()
2686 vcpu->pvclock_set_guest_stopped_request = false; in kvm_setup_pvclock_page()
2689 trace_kvm_pvclock_update(v->vcpu_id, &vcpu->hv_clock); in kvm_setup_pvclock_page()
2691 kvm_write_guest_cached(v->kvm, &vcpu->pv_time, in kvm_setup_pvclock_page()
2692 &vcpu->hv_clock, in kvm_setup_pvclock_page()
2693 sizeof(vcpu->hv_clock)); in kvm_setup_pvclock_page()
2697 vcpu->hv_clock.version++; in kvm_setup_pvclock_page()
2698 kvm_write_guest_cached(v->kvm, &vcpu->pv_time, in kvm_setup_pvclock_page()
2699 &vcpu->hv_clock, in kvm_setup_pvclock_page()
2700 sizeof(vcpu->hv_clock.version)); in kvm_setup_pvclock_page()
2706 struct kvm_vcpu_arch *vcpu = &v->arch; in kvm_guest_time_update()
2707 struct kvm_arch *ka = &v->kvm->arch; in kvm_guest_time_update()
2720 spin_lock(&ka->pvclock_gtod_sync_lock); in kvm_guest_time_update()
2721 use_master_clock = ka->use_master_clock; in kvm_guest_time_update()
2723 host_tsc = ka->master_cycle_now; in kvm_guest_time_update()
2724 kernel_ns = ka->master_kernel_ns; in kvm_guest_time_update()
2726 spin_unlock(&ka->pvclock_gtod_sync_lock); in kvm_guest_time_update()
2753 if (vcpu->tsc_catchup) { in kvm_guest_time_update()
2756 adjust_tsc_offset_guest(v, tsc - tsc_timestamp); in kvm_guest_time_update()
2768 if (unlikely(vcpu->hw_tsc_khz != tgt_tsc_khz)) { in kvm_guest_time_update()
2769 kvm_get_time_scale(NSEC_PER_SEC, tgt_tsc_khz * 1000LL, in kvm_guest_time_update()
2770 &vcpu->hv_clock.tsc_shift, in kvm_guest_time_update()
2771 &vcpu->hv_clock.tsc_to_system_mul); in kvm_guest_time_update()
2772 vcpu->hw_tsc_khz = tgt_tsc_khz; in kvm_guest_time_update()
2775 vcpu->hv_clock.tsc_timestamp = tsc_timestamp; in kvm_guest_time_update()
2776 vcpu->hv_clock.system_time = kernel_ns + v->kvm->arch.kvmclock_offset; in kvm_guest_time_update()
2777 vcpu->last_guest_tsc = tsc_timestamp; in kvm_guest_time_update()
2784 vcpu->hv_clock.flags = pvclock_flags; in kvm_guest_time_update()
2786 if (vcpu->pv_time_enabled) in kvm_guest_time_update()
2788 if (v == kvm_get_vcpu(v->kvm, 0)) in kvm_guest_time_update()
2789 kvm_hv_setup_tsc_page(v->kvm, &vcpu->hv_clock); in kvm_guest_time_update()
2795 * vcpu->cpu migration, should not allow system_timestamp from
2801 * We need to rate-limit these requests though, as they can
2804 * by the delay we use to rate-limit the updates.
2826 struct kvm *kvm = v->kvm; in kvm_gen_kvmclock_update()
2829 schedule_delayed_work(&kvm->arch.kvmclock_update_work, in kvm_gen_kvmclock_update()
2845 schedule_delayed_work(&kvm->arch.kvmclock_update_work, 0); in kvmclock_sync_fn()
2846 schedule_delayed_work(&kvm->arch.kvmclock_sync_work, in kvmclock_sync_fn()
2857 return !!(vcpu->arch.msr_hwcr & BIT_ULL(18)); in can_set_mci_status()
2864 u64 mcg_cap = vcpu->arch.mcg_cap; in set_msr_mce()
2866 u32 msr = msr_info->index; in set_msr_mce()
2867 u64 data = msr_info->data; in set_msr_mce()
2871 vcpu->arch.mcg_status = data; in set_msr_mce()
2875 (data || !msr_info->host_initiated)) in set_msr_mce()
2879 vcpu->arch.mcg_ctl = data; in set_msr_mce()
2885 msr - MSR_IA32_MC0_CTL, in set_msr_mce()
2886 MSR_IA32_MCx_CTL(bank_num) - MSR_IA32_MC0_CTL); in set_msr_mce()
2894 * correctable, single-bit ECC data errors. in set_msr_mce()
2901 if (!msr_info->host_initiated && in set_msr_mce()
2907 vcpu->arch.mce_banks[offset] = data; in set_msr_mce()
2917 struct kvm *kvm = vcpu->kvm; in xen_hvm_config()
2919 u8 *blob_addr = lm ? (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_64 in xen_hvm_config()
2920 : (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_32; in xen_hvm_config()
2921 u8 blob_size = lm ? kvm->arch.xen_hvm_config.blob_size_64 in xen_hvm_config()
2922 : kvm->arch.xen_hvm_config.blob_size_32; in xen_hvm_config()
2945 return (vcpu->arch.apf.msr_en_val & mask) == mask; in kvm_pv_async_pf_enabled()
2967 vcpu->arch.apf.msr_en_val = data; in kvm_pv_enable_async_pf()
2975 if (kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.apf.data, gpa, in kvm_pv_enable_async_pf()
2979 vcpu->arch.apf.send_user_only = !(data & KVM_ASYNC_PF_SEND_ALWAYS); in kvm_pv_enable_async_pf()
2980 vcpu->arch.apf.delivery_as_pf_vmexit = data & KVM_ASYNC_PF_DELIVERY_AS_PF_VMEXIT; in kvm_pv_enable_async_pf()
2989 /* Bits 8-63 are reserved */ in kvm_pv_enable_async_pf_int()
2996 vcpu->arch.apf.msr_int_val = data; in kvm_pv_enable_async_pf_int()
2998 vcpu->arch.apf.vec = data & KVM_ASYNC_PF_VEC_MASK; in kvm_pv_enable_async_pf_int()
3005 vcpu->arch.pv_time_enabled = false; in kvmclock_reset()
3006 vcpu->arch.time = 0; in kvmclock_reset()
3011 ++vcpu->stat.tlb_flush; in kvm_vcpu_flush_tlb_all()
3017 ++vcpu->stat.tlb_flush; in kvm_vcpu_flush_tlb_guest()
3026 if (!(vcpu->arch.st.msr_val & KVM_MSR_ENABLED)) in record_steal_time()
3029 /* -EAGAIN is returned in atomic context so we can just return. */ in record_steal_time()
3030 if (kvm_map_gfn(vcpu, vcpu->arch.st.msr_val >> PAGE_SHIFT, in record_steal_time()
3031 &map, &vcpu->arch.st.cache, false)) in record_steal_time()
3035 offset_in_page(vcpu->arch.st.msr_val & KVM_STEAL_VALID_BITS); in record_steal_time()
3042 trace_kvm_pv_tlb_flush(vcpu->vcpu_id, in record_steal_time()
3043 st->preempted & KVM_VCPU_FLUSH_TLB); in record_steal_time()
3044 if (xchg(&st->preempted, 0) & KVM_VCPU_FLUSH_TLB) in record_steal_time()
3047 st->preempted = 0; in record_steal_time()
3050 vcpu->arch.st.preempted = 0; in record_steal_time()
3052 if (st->version & 1) in record_steal_time()
3053 st->version += 1; /* first time write, random junk */ in record_steal_time()
3055 st->version += 1; in record_steal_time()
3059 st->steal += current->sched_info.run_delay - in record_steal_time()
3060 vcpu->arch.st.last_steal; in record_steal_time()
3061 vcpu->arch.st.last_steal = current->sched_info.run_delay; in record_steal_time()
3065 st->version += 1; in record_steal_time()
3067 kvm_unmap_gfn(vcpu, &map, &vcpu->arch.st.cache, true, false); in record_steal_time()
3073 u32 msr = msr_info->index; in kvm_set_msr_common()
3074 u64 data = msr_info->data; in kvm_set_msr_common()
3087 if (msr_info->host_initiated) in kvm_set_msr_common()
3088 vcpu->arch.microcode_version = data; in kvm_set_msr_common()
3091 if (!msr_info->host_initiated) in kvm_set_msr_common()
3093 vcpu->arch.arch_capabilities = data; in kvm_set_msr_common()
3098 if (!msr_info->host_initiated) in kvm_set_msr_common()
3105 vcpu->arch.perf_capabilities = data; in kvm_set_msr_common()
3118 vcpu->arch.msr_hwcr = data; in kvm_set_msr_common()
3134 /* We support the non-activated case already */ in kvm_set_msr_common()
3137 /* Values other than LBR and BTF are vendor-specific, in kvm_set_msr_common()
3155 if (!msr_info->host_initiated) { in kvm_set_msr_common()
3156 s64 adj = data - vcpu->arch.ia32_tsc_adjust_msr; in kvm_set_msr_common()
3163 vcpu->arch.ia32_tsc_adjust_msr = data; in kvm_set_msr_common()
3167 if (!kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_MISC_ENABLE_NO_MWAIT) && in kvm_set_msr_common()
3168 ((vcpu->arch.ia32_misc_enable_msr ^ data) & MSR_IA32_MISC_ENABLE_MWAIT)) { in kvm_set_msr_common()
3171 vcpu->arch.ia32_misc_enable_msr = data; in kvm_set_msr_common()
3174 vcpu->arch.ia32_misc_enable_msr = data; in kvm_set_msr_common()
3178 if (!msr_info->host_initiated) in kvm_set_msr_common()
3180 vcpu->arch.smbase = data; in kvm_set_msr_common()
3183 vcpu->arch.msr_ia32_power_ctl = data; in kvm_set_msr_common()
3186 if (msr_info->host_initiated) { in kvm_set_msr_common()
3189 u64 adj = kvm_compute_tsc_offset(vcpu, data) - vcpu->arch.l1_tsc_offset; in kvm_set_msr_common()
3191 vcpu->arch.ia32_tsc_adjust_msr += adj; in kvm_set_msr_common()
3195 if (!msr_info->host_initiated && in kvm_set_msr_common()
3205 vcpu->arch.ia32_xss = data; in kvm_set_msr_common()
3209 if (!msr_info->host_initiated) in kvm_set_msr_common()
3211 vcpu->arch.smi_count = data; in kvm_set_msr_common()
3217 kvm_write_wall_clock(vcpu->kvm, data); in kvm_set_msr_common()
3223 kvm_write_wall_clock(vcpu->kvm, data); in kvm_set_msr_common()
3229 kvm_write_system_time(vcpu, data, false, msr_info->host_initiated); in kvm_set_msr_common()
3235 kvm_write_system_time(vcpu, data, true, msr_info->host_initiated); in kvm_set_msr_common()
3255 vcpu->arch.apf.pageready_pending = false; in kvm_set_msr_common()
3269 vcpu->arch.st.msr_val = data; in kvm_set_msr_common()
3290 if (data & (-1ULL << 1)) in kvm_set_msr_common()
3293 vcpu->arch.msr_kvm_poll_control = data; in kvm_set_msr_common()
3298 case MSR_IA32_MC0_CTL ... MSR_IA32_MCx_CTL(KVM_MAX_MCE_BANKS) - 1: in kvm_set_msr_common()
3318 * all pre-dating SVM, but a recommended workaround from in kvm_set_msr_common()
3334 msr_info->host_initiated); in kvm_set_msr_common()
3336 /* Drop writes to this legacy MSR -- see rdmsr in kvm_set_msr_common()
3346 vcpu->arch.osvw.length = data; in kvm_set_msr_common()
3351 vcpu->arch.osvw.status = data; in kvm_set_msr_common()
3354 if (!msr_info->host_initiated || in kvm_set_msr_common()
3358 vcpu->arch.msr_platform_info = data; in kvm_set_msr_common()
3365 vcpu->arch.msr_misc_features_enables = data; in kvm_set_msr_common()
3368 if (msr && (msr == vcpu->kvm->arch.xen_hvm_config.msr)) in kvm_set_msr_common()
3381 u64 mcg_cap = vcpu->arch.mcg_cap; in get_msr_mce()
3390 data = vcpu->arch.mcg_cap; in get_msr_mce()
3395 data = vcpu->arch.mcg_ctl; in get_msr_mce()
3398 data = vcpu->arch.mcg_status; in get_msr_mce()
3404 msr - MSR_IA32_MC0_CTL, in get_msr_mce()
3405 MSR_IA32_MCx_CTL(bank_num) - MSR_IA32_MC0_CTL); in get_msr_mce()
3407 data = vcpu->arch.mce_banks[offset]; in get_msr_mce()
3418 switch (msr_info->index) { in kvm_get_msr_common()
3441 * so for existing CPU-specific MSRs. in kvm_get_msr_common()
3448 msr_info->data = 0; in kvm_get_msr_common()
3455 if (kvm_pmu_is_valid_msr(vcpu, msr_info->index)) in kvm_get_msr_common()
3457 msr_info->data = 0; in kvm_get_msr_common()
3460 msr_info->data = vcpu->arch.microcode_version; in kvm_get_msr_common()
3463 if (!msr_info->host_initiated && in kvm_get_msr_common()
3466 msr_info->data = vcpu->arch.arch_capabilities; in kvm_get_msr_common()
3469 if (!msr_info->host_initiated && in kvm_get_msr_common()
3472 msr_info->data = vcpu->arch.perf_capabilities; in kvm_get_msr_common()
3475 msr_info->data = vcpu->arch.msr_ia32_power_ctl; in kvm_get_msr_common()
3484 * return L1's TSC value to ensure backwards-compatible in kvm_get_msr_common()
3487 u64 tsc_offset = msr_info->host_initiated ? vcpu->arch.l1_tsc_offset : in kvm_get_msr_common()
3488 vcpu->arch.tsc_offset; in kvm_get_msr_common()
3490 msr_info->data = kvm_scale_tsc(vcpu, rdtsc()) + tsc_offset; in kvm_get_msr_common()
3495 return kvm_mtrr_get_msr(vcpu, msr_info->index, &msr_info->data); in kvm_get_msr_common()
3497 msr_info->data = 3; in kvm_get_msr_common()
3511 msr_info->data = 1 << 24; in kvm_get_msr_common()
3514 msr_info->data = kvm_get_apic_base(vcpu); in kvm_get_msr_common()
3517 return kvm_x2apic_msr_read(vcpu, msr_info->index, &msr_info->data); in kvm_get_msr_common()
3519 msr_info->data = kvm_get_lapic_tscdeadline_msr(vcpu); in kvm_get_msr_common()
3522 msr_info->data = (u64)vcpu->arch.ia32_tsc_adjust_msr; in kvm_get_msr_common()
3525 msr_info->data = vcpu->arch.ia32_misc_enable_msr; in kvm_get_msr_common()
3528 if (!msr_info->host_initiated) in kvm_get_msr_common()
3530 msr_info->data = vcpu->arch.smbase; in kvm_get_msr_common()
3533 msr_info->data = vcpu->arch.smi_count; in kvm_get_msr_common()
3537 msr_info->data = 1000ULL; in kvm_get_msr_common()
3539 msr_info->data |= (((uint64_t)4ULL) << 40); in kvm_get_msr_common()
3542 msr_info->data = vcpu->arch.efer; in kvm_get_msr_common()
3548 msr_info->data = vcpu->kvm->arch.wall_clock; in kvm_get_msr_common()
3554 msr_info->data = vcpu->kvm->arch.wall_clock; in kvm_get_msr_common()
3560 msr_info->data = vcpu->arch.time; in kvm_get_msr_common()
3566 msr_info->data = vcpu->arch.time; in kvm_get_msr_common()
3572 msr_info->data = vcpu->arch.apf.msr_en_val; in kvm_get_msr_common()
3578 msr_info->data = vcpu->arch.apf.msr_int_val; in kvm_get_msr_common()
3584 msr_info->data = 0; in kvm_get_msr_common()
3590 msr_info->data = vcpu->arch.st.msr_val; in kvm_get_msr_common()
3596 msr_info->data = vcpu->arch.pv_eoi.msr_val; in kvm_get_msr_common()
3602 msr_info->data = vcpu->arch.msr_kvm_poll_control; in kvm_get_msr_common()
3609 case MSR_IA32_MC0_CTL ... MSR_IA32_MCx_CTL(KVM_MAX_MCE_BANKS) - 1: in kvm_get_msr_common()
3610 return get_msr_mce(vcpu, msr_info->index, &msr_info->data, in kvm_get_msr_common()
3611 msr_info->host_initiated); in kvm_get_msr_common()
3613 if (!msr_info->host_initiated && in kvm_get_msr_common()
3616 msr_info->data = vcpu->arch.ia32_xss; in kvm_get_msr_common()
3620 * Provide expected ramp-up count for K7. All other in kvm_get_msr_common()
3628 msr_info->data = 0x20000000; in kvm_get_msr_common()
3640 msr_info->index, &msr_info->data, in kvm_get_msr_common()
3641 msr_info->host_initiated); in kvm_get_msr_common()
3653 msr_info->data = 0xbe702111; in kvm_get_msr_common()
3658 msr_info->data = vcpu->arch.osvw.length; in kvm_get_msr_common()
3663 msr_info->data = vcpu->arch.osvw.status; in kvm_get_msr_common()
3666 if (!msr_info->host_initiated && in kvm_get_msr_common()
3667 !vcpu->kvm->arch.guest_can_read_msr_platform_info) in kvm_get_msr_common()
3669 msr_info->data = vcpu->arch.msr_platform_info; in kvm_get_msr_common()
3672 msr_info->data = vcpu->arch.msr_misc_features_enables; in kvm_get_msr_common()
3675 msr_info->data = vcpu->arch.msr_hwcr; in kvm_get_msr_common()
3678 if (kvm_pmu_is_valid_msr(vcpu, msr_info->index)) in kvm_get_msr_common()
3698 for (i = 0; i < msrs->nmsrs; ++i) in __msr_io()
3720 r = -EFAULT; in msr_io()
3724 r = -E2BIG; in msr_io()
3729 entries = memdup_user(user_msrs->entries, size); in msr_io()
3739 r = -EFAULT; in msr_io()
3740 if (writeback && copy_to_user(user_msrs->entries, entries, size)) in msr_io()
3872 r = kvm_x86_ops.nested_ops->get_state ? in kvm_vm_ioctl_check_extension()
3873 kvm_x86_ops.nested_ops->get_state(NULL, NULL, 0) : 0; in kvm_vm_ioctl_check_extension()
3879 r = kvm_x86_ops.nested_ops->enable_evmcs != NULL; in kvm_vm_ioctl_check_extension()
3906 r = -EFAULT; in kvm_arch_dev_ioctl()
3913 r = -E2BIG; in kvm_arch_dev_ioctl()
3916 r = -EFAULT; in kvm_arch_dev_ioctl()
3917 if (copy_to_user(user_msr_list->indices, &msrs_to_save, in kvm_arch_dev_ioctl()
3920 if (copy_to_user(user_msr_list->indices + num_msrs_to_save, in kvm_arch_dev_ioctl()
3932 r = -EFAULT; in kvm_arch_dev_ioctl()
3936 r = kvm_dev_ioctl_get_cpuid(&cpuid, cpuid_arg->entries, in kvm_arch_dev_ioctl()
3941 r = -EFAULT; in kvm_arch_dev_ioctl()
3948 r = -EFAULT; in kvm_arch_dev_ioctl()
3959 r = -EFAULT; in kvm_arch_dev_ioctl()
3966 r = -E2BIG; in kvm_arch_dev_ioctl()
3969 r = -EFAULT; in kvm_arch_dev_ioctl()
3970 if (copy_to_user(user_msr_list->indices, &msr_based_features, in kvm_arch_dev_ioctl()
3980 r = -EINVAL; in kvm_arch_dev_ioctl()
3994 return kvm_arch_has_noncoherent_dma(vcpu->kvm); in need_emulate_wbinvd()
4002 cpumask_set_cpu(cpu, vcpu->arch.wbinvd_dirty_mask); in kvm_arch_vcpu_load()
4003 else if (vcpu->cpu != -1 && vcpu->cpu != cpu) in kvm_arch_vcpu_load()
4004 smp_call_function_single(vcpu->cpu, in kvm_arch_vcpu_load()
4011 vcpu->arch.host_pkru = read_pkru(); in kvm_arch_vcpu_load()
4014 if (unlikely(vcpu->arch.tsc_offset_adjustment)) { in kvm_arch_vcpu_load()
4015 adjust_tsc_offset_host(vcpu, vcpu->arch.tsc_offset_adjustment); in kvm_arch_vcpu_load()
4016 vcpu->arch.tsc_offset_adjustment = 0; in kvm_arch_vcpu_load()
4020 if (unlikely(vcpu->cpu != cpu) || kvm_check_tsc_unstable()) { in kvm_arch_vcpu_load()
4021 s64 tsc_delta = !vcpu->arch.last_host_tsc ? 0 : in kvm_arch_vcpu_load()
4022 rdtsc() - vcpu->arch.last_host_tsc; in kvm_arch_vcpu_load()
4028 vcpu->arch.last_guest_tsc); in kvm_arch_vcpu_load()
4030 vcpu->arch.tsc_catchup = 1; in kvm_arch_vcpu_load()
4038 * kvmclock on vcpu->cpu migration in kvm_arch_vcpu_load()
4040 if (!vcpu->kvm->arch.use_master_clock || vcpu->cpu == -1) in kvm_arch_vcpu_load()
4042 if (vcpu->cpu != cpu) in kvm_arch_vcpu_load()
4044 vcpu->cpu = cpu; in kvm_arch_vcpu_load()
4055 if (!(vcpu->arch.st.msr_val & KVM_MSR_ENABLED)) in kvm_steal_time_set_preempted()
4058 if (vcpu->arch.st.preempted) in kvm_steal_time_set_preempted()
4061 if (kvm_map_gfn(vcpu, vcpu->arch.st.msr_val >> PAGE_SHIFT, &map, in kvm_steal_time_set_preempted()
4062 &vcpu->arch.st.cache, true)) in kvm_steal_time_set_preempted()
4066 offset_in_page(vcpu->arch.st.msr_val & KVM_STEAL_VALID_BITS); in kvm_steal_time_set_preempted()
4068 st->preempted = vcpu->arch.st.preempted = KVM_VCPU_PREEMPTED; in kvm_steal_time_set_preempted()
4070 kvm_unmap_gfn(vcpu, &map, &vcpu->arch.st.cache, true, true); in kvm_steal_time_set_preempted()
4077 if (vcpu->preempted) in kvm_arch_vcpu_put()
4078 vcpu->arch.preempted_in_kernel = !kvm_x86_ops.get_cpl(vcpu); in kvm_arch_vcpu_put()
4093 idx = srcu_read_lock(&vcpu->kvm->srcu); in kvm_arch_vcpu_put()
4095 srcu_read_unlock(&vcpu->kvm->srcu, idx); in kvm_arch_vcpu_put()
4098 vcpu->arch.last_host_tsc = rdtsc(); in kvm_arch_vcpu_put()
4110 if (vcpu->arch.apicv_active) in kvm_vcpu_ioctl_get_lapic()
4152 * instruction boundary and with no events half-injected. in kvm_vcpu_ready_for_interrupt_injection()
4157 !vcpu->arch.exception.pending); in kvm_vcpu_ready_for_interrupt_injection()
4163 if (irq->irq >= KVM_NR_INTERRUPTS) in kvm_vcpu_ioctl_interrupt()
4164 return -EINVAL; in kvm_vcpu_ioctl_interrupt()
4166 if (!irqchip_in_kernel(vcpu->kvm)) { in kvm_vcpu_ioctl_interrupt()
4167 kvm_queue_interrupt(vcpu, irq->irq, false); in kvm_vcpu_ioctl_interrupt()
4173 * With in-kernel LAPIC, we only use this to inject EXTINT, so in kvm_vcpu_ioctl_interrupt()
4174 * fail for in-kernel 8259. in kvm_vcpu_ioctl_interrupt()
4176 if (pic_in_kernel(vcpu->kvm)) in kvm_vcpu_ioctl_interrupt()
4177 return -ENXIO; in kvm_vcpu_ioctl_interrupt()
4179 if (vcpu->arch.pending_external_vector != -1) in kvm_vcpu_ioctl_interrupt()
4180 return -EEXIST; in kvm_vcpu_ioctl_interrupt()
4182 vcpu->arch.pending_external_vector = irq->irq; in kvm_vcpu_ioctl_interrupt()
4204 if (tac->flags) in vcpu_ioctl_tpr_access_reporting()
4205 return -EINVAL; in vcpu_ioctl_tpr_access_reporting()
4206 vcpu->arch.tpr_access_reporting = !!tac->enabled; in vcpu_ioctl_tpr_access_reporting()
4216 r = -EINVAL; in kvm_vcpu_ioctl_x86_setup_mce()
4222 vcpu->arch.mcg_cap = mcg_cap; in kvm_vcpu_ioctl_x86_setup_mce()
4225 vcpu->arch.mcg_ctl = ~(u64)0; in kvm_vcpu_ioctl_x86_setup_mce()
4228 vcpu->arch.mce_banks[bank*4] = ~(u64)0; in kvm_vcpu_ioctl_x86_setup_mce()
4238 u64 mcg_cap = vcpu->arch.mcg_cap; in kvm_vcpu_ioctl_x86_set_mce()
4240 u64 *banks = vcpu->arch.mce_banks; in kvm_vcpu_ioctl_x86_set_mce()
4242 if (mce->bank >= bank_num || !(mce->status & MCI_STATUS_VAL)) in kvm_vcpu_ioctl_x86_set_mce()
4243 return -EINVAL; in kvm_vcpu_ioctl_x86_set_mce()
4248 if ((mce->status & MCI_STATUS_UC) && (mcg_cap & MCG_CTL_P) && in kvm_vcpu_ioctl_x86_set_mce()
4249 vcpu->arch.mcg_ctl != ~(u64)0) in kvm_vcpu_ioctl_x86_set_mce()
4251 banks += 4 * mce->bank; in kvm_vcpu_ioctl_x86_set_mce()
4256 if ((mce->status & MCI_STATUS_UC) && banks[0] != ~(u64)0) in kvm_vcpu_ioctl_x86_set_mce()
4258 if (mce->status & MCI_STATUS_UC) { in kvm_vcpu_ioctl_x86_set_mce()
4259 if ((vcpu->arch.mcg_status & MCG_STATUS_MCIP) || in kvm_vcpu_ioctl_x86_set_mce()
4265 mce->status |= MCI_STATUS_OVER; in kvm_vcpu_ioctl_x86_set_mce()
4266 banks[2] = mce->addr; in kvm_vcpu_ioctl_x86_set_mce()
4267 banks[3] = mce->misc; in kvm_vcpu_ioctl_x86_set_mce()
4268 vcpu->arch.mcg_status = mce->mcg_status; in kvm_vcpu_ioctl_x86_set_mce()
4269 banks[1] = mce->status; in kvm_vcpu_ioctl_x86_set_mce()
4274 mce->status |= MCI_STATUS_OVER; in kvm_vcpu_ioctl_x86_set_mce()
4275 banks[2] = mce->addr; in kvm_vcpu_ioctl_x86_set_mce()
4276 banks[3] = mce->misc; in kvm_vcpu_ioctl_x86_set_mce()
4277 banks[1] = mce->status; in kvm_vcpu_ioctl_x86_set_mce()
4295 * modified under nVMX). Unless the per-VM capability, in kvm_vcpu_ioctl_x86_get_vcpu_events()
4302 if (!vcpu->kvm->arch.exception_payload_enabled && in kvm_vcpu_ioctl_x86_get_vcpu_events()
4303 vcpu->arch.exception.pending && vcpu->arch.exception.has_payload) in kvm_vcpu_ioctl_x86_get_vcpu_events()
4312 if (kvm_exception_is_soft(vcpu->arch.exception.nr)) { in kvm_vcpu_ioctl_x86_get_vcpu_events()
4313 events->exception.injected = 0; in kvm_vcpu_ioctl_x86_get_vcpu_events()
4314 events->exception.pending = 0; in kvm_vcpu_ioctl_x86_get_vcpu_events()
4316 events->exception.injected = vcpu->arch.exception.injected; in kvm_vcpu_ioctl_x86_get_vcpu_events()
4317 events->exception.pending = vcpu->arch.exception.pending; in kvm_vcpu_ioctl_x86_get_vcpu_events()
4323 if (!vcpu->kvm->arch.exception_payload_enabled) in kvm_vcpu_ioctl_x86_get_vcpu_events()
4324 events->exception.injected |= in kvm_vcpu_ioctl_x86_get_vcpu_events()
4325 vcpu->arch.exception.pending; in kvm_vcpu_ioctl_x86_get_vcpu_events()
4327 events->exception.nr = vcpu->arch.exception.nr; in kvm_vcpu_ioctl_x86_get_vcpu_events()
4328 events->exception.has_error_code = vcpu->arch.exception.has_error_code; in kvm_vcpu_ioctl_x86_get_vcpu_events()
4329 events->exception.error_code = vcpu->arch.exception.error_code; in kvm_vcpu_ioctl_x86_get_vcpu_events()
4330 events->exception_has_payload = vcpu->arch.exception.has_payload; in kvm_vcpu_ioctl_x86_get_vcpu_events()
4331 events->exception_payload = vcpu->arch.exception.payload; in kvm_vcpu_ioctl_x86_get_vcpu_events()
4333 events->interrupt.injected = in kvm_vcpu_ioctl_x86_get_vcpu_events()
4334 vcpu->arch.interrupt.injected && !vcpu->arch.interrupt.soft; in kvm_vcpu_ioctl_x86_get_vcpu_events()
4335 events->interrupt.nr = vcpu->arch.interrupt.nr; in kvm_vcpu_ioctl_x86_get_vcpu_events()
4336 events->interrupt.soft = 0; in kvm_vcpu_ioctl_x86_get_vcpu_events()
4337 events->interrupt.shadow = kvm_x86_ops.get_interrupt_shadow(vcpu); in kvm_vcpu_ioctl_x86_get_vcpu_events()
4339 events->nmi.injected = vcpu->arch.nmi_injected; in kvm_vcpu_ioctl_x86_get_vcpu_events()
4340 events->nmi.pending = vcpu->arch.nmi_pending != 0; in kvm_vcpu_ioctl_x86_get_vcpu_events()
4341 events->nmi.masked = kvm_x86_ops.get_nmi_mask(vcpu); in kvm_vcpu_ioctl_x86_get_vcpu_events()
4342 events->nmi.pad = 0; in kvm_vcpu_ioctl_x86_get_vcpu_events()
4344 events->sipi_vector = 0; /* never valid when reporting to user space */ in kvm_vcpu_ioctl_x86_get_vcpu_events()
4346 events->smi.smm = is_smm(vcpu); in kvm_vcpu_ioctl_x86_get_vcpu_events()
4347 events->smi.pending = vcpu->arch.smi_pending; in kvm_vcpu_ioctl_x86_get_vcpu_events()
4348 events->smi.smm_inside_nmi = in kvm_vcpu_ioctl_x86_get_vcpu_events()
4349 !!(vcpu->arch.hflags & HF_SMM_INSIDE_NMI_MASK); in kvm_vcpu_ioctl_x86_get_vcpu_events()
4350 events->smi.latched_init = kvm_lapic_latched_init(vcpu); in kvm_vcpu_ioctl_x86_get_vcpu_events()
4352 events->flags = (KVM_VCPUEVENT_VALID_NMI_PENDING in kvm_vcpu_ioctl_x86_get_vcpu_events()
4355 if (vcpu->kvm->arch.exception_payload_enabled) in kvm_vcpu_ioctl_x86_get_vcpu_events()
4356 events->flags |= KVM_VCPUEVENT_VALID_PAYLOAD; in kvm_vcpu_ioctl_x86_get_vcpu_events()
4358 memset(&events->reserved, 0, sizeof(events->reserved)); in kvm_vcpu_ioctl_x86_get_vcpu_events()
4366 if (events->flags & ~(KVM_VCPUEVENT_VALID_NMI_PENDING in kvm_vcpu_ioctl_x86_set_vcpu_events()
4371 return -EINVAL; in kvm_vcpu_ioctl_x86_set_vcpu_events()
4373 if (events->flags & KVM_VCPUEVENT_VALID_PAYLOAD) { in kvm_vcpu_ioctl_x86_set_vcpu_events()
4374 if (!vcpu->kvm->arch.exception_payload_enabled) in kvm_vcpu_ioctl_x86_set_vcpu_events()
4375 return -EINVAL; in kvm_vcpu_ioctl_x86_set_vcpu_events()
4376 if (events->exception.pending) in kvm_vcpu_ioctl_x86_set_vcpu_events()
4377 events->exception.injected = 0; in kvm_vcpu_ioctl_x86_set_vcpu_events()
4379 events->exception_has_payload = 0; in kvm_vcpu_ioctl_x86_set_vcpu_events()
4381 events->exception.pending = 0; in kvm_vcpu_ioctl_x86_set_vcpu_events()
4382 events->exception_has_payload = 0; in kvm_vcpu_ioctl_x86_set_vcpu_events()
4385 if ((events->exception.injected || events->exception.pending) && in kvm_vcpu_ioctl_x86_set_vcpu_events()
4386 (events->exception.nr > 31 || events->exception.nr == NMI_VECTOR)) in kvm_vcpu_ioctl_x86_set_vcpu_events()
4387 return -EINVAL; in kvm_vcpu_ioctl_x86_set_vcpu_events()
4390 if (events->flags & KVM_VCPUEVENT_VALID_SMM && in kvm_vcpu_ioctl_x86_set_vcpu_events()
4391 (events->smi.smm || events->smi.pending) && in kvm_vcpu_ioctl_x86_set_vcpu_events()
4392 vcpu->arch.mp_state == KVM_MP_STATE_INIT_RECEIVED) in kvm_vcpu_ioctl_x86_set_vcpu_events()
4393 return -EINVAL; in kvm_vcpu_ioctl_x86_set_vcpu_events()
4396 vcpu->arch.exception.injected = events->exception.injected; in kvm_vcpu_ioctl_x86_set_vcpu_events()
4397 vcpu->arch.exception.pending = events->exception.pending; in kvm_vcpu_ioctl_x86_set_vcpu_events()
4398 vcpu->arch.exception.nr = events->exception.nr; in kvm_vcpu_ioctl_x86_set_vcpu_events()
4399 vcpu->arch.exception.has_error_code = events->exception.has_error_code; in kvm_vcpu_ioctl_x86_set_vcpu_events()
4400 vcpu->arch.exception.error_code = events->exception.error_code; in kvm_vcpu_ioctl_x86_set_vcpu_events()
4401 vcpu->arch.exception.has_payload = events->exception_has_payload; in kvm_vcpu_ioctl_x86_set_vcpu_events()
4402 vcpu->arch.exception.payload = events->exception_payload; in kvm_vcpu_ioctl_x86_set_vcpu_events()
4404 vcpu->arch.interrupt.injected = events->interrupt.injected; in kvm_vcpu_ioctl_x86_set_vcpu_events()
4405 vcpu->arch.interrupt.nr = events->interrupt.nr; in kvm_vcpu_ioctl_x86_set_vcpu_events()
4406 vcpu->arch.interrupt.soft = events->interrupt.soft; in kvm_vcpu_ioctl_x86_set_vcpu_events()
4407 if (events->flags & KVM_VCPUEVENT_VALID_SHADOW) in kvm_vcpu_ioctl_x86_set_vcpu_events()
4409 events->interrupt.shadow); in kvm_vcpu_ioctl_x86_set_vcpu_events()
4411 vcpu->arch.nmi_injected = events->nmi.injected; in kvm_vcpu_ioctl_x86_set_vcpu_events()
4412 if (events->flags & KVM_VCPUEVENT_VALID_NMI_PENDING) in kvm_vcpu_ioctl_x86_set_vcpu_events()
4413 vcpu->arch.nmi_pending = events->nmi.pending; in kvm_vcpu_ioctl_x86_set_vcpu_events()
4414 kvm_x86_ops.set_nmi_mask(vcpu, events->nmi.masked); in kvm_vcpu_ioctl_x86_set_vcpu_events()
4416 if (events->flags & KVM_VCPUEVENT_VALID_SIPI_VECTOR && in kvm_vcpu_ioctl_x86_set_vcpu_events()
4418 vcpu->arch.apic->sipi_vector = events->sipi_vector; in kvm_vcpu_ioctl_x86_set_vcpu_events()
4420 if (events->flags & KVM_VCPUEVENT_VALID_SMM) { in kvm_vcpu_ioctl_x86_set_vcpu_events()
4421 if (!!(vcpu->arch.hflags & HF_SMM_MASK) != events->smi.smm) { in kvm_vcpu_ioctl_x86_set_vcpu_events()
4422 if (events->smi.smm) in kvm_vcpu_ioctl_x86_set_vcpu_events()
4423 vcpu->arch.hflags |= HF_SMM_MASK; in kvm_vcpu_ioctl_x86_set_vcpu_events()
4425 vcpu->arch.hflags &= ~HF_SMM_MASK; in kvm_vcpu_ioctl_x86_set_vcpu_events()
4427 kvm_x86_ops.nested_ops->leave_nested(vcpu); in kvm_vcpu_ioctl_x86_set_vcpu_events()
4431 vcpu->arch.smi_pending = events->smi.pending; in kvm_vcpu_ioctl_x86_set_vcpu_events()
4433 if (events->smi.smm) { in kvm_vcpu_ioctl_x86_set_vcpu_events()
4434 if (events->smi.smm_inside_nmi) in kvm_vcpu_ioctl_x86_set_vcpu_events()
4435 vcpu->arch.hflags |= HF_SMM_INSIDE_NMI_MASK; in kvm_vcpu_ioctl_x86_set_vcpu_events()
4437 vcpu->arch.hflags &= ~HF_SMM_INSIDE_NMI_MASK; in kvm_vcpu_ioctl_x86_set_vcpu_events()
4441 if (events->smi.latched_init) in kvm_vcpu_ioctl_x86_set_vcpu_events()
4442 set_bit(KVM_APIC_INIT, &vcpu->arch.apic->pending_events); in kvm_vcpu_ioctl_x86_set_vcpu_events()
4444 clear_bit(KVM_APIC_INIT, &vcpu->arch.apic->pending_events); in kvm_vcpu_ioctl_x86_set_vcpu_events()
4458 memcpy(dbgregs->db, vcpu->arch.db, sizeof(vcpu->arch.db)); in kvm_vcpu_ioctl_x86_get_debugregs()
4460 dbgregs->dr6 = val; in kvm_vcpu_ioctl_x86_get_debugregs()
4461 dbgregs->dr7 = vcpu->arch.dr7; in kvm_vcpu_ioctl_x86_get_debugregs()
4462 dbgregs->flags = 0; in kvm_vcpu_ioctl_x86_get_debugregs()
4463 memset(&dbgregs->reserved, 0, sizeof(dbgregs->reserved)); in kvm_vcpu_ioctl_x86_get_debugregs()
4469 if (dbgregs->flags) in kvm_vcpu_ioctl_x86_set_debugregs()
4470 return -EINVAL; in kvm_vcpu_ioctl_x86_set_debugregs()
4472 if (dbgregs->dr6 & ~0xffffffffull) in kvm_vcpu_ioctl_x86_set_debugregs()
4473 return -EINVAL; in kvm_vcpu_ioctl_x86_set_debugregs()
4474 if (dbgregs->dr7 & ~0xffffffffull) in kvm_vcpu_ioctl_x86_set_debugregs()
4475 return -EINVAL; in kvm_vcpu_ioctl_x86_set_debugregs()
4477 memcpy(vcpu->arch.db, dbgregs->db, sizeof(vcpu->arch.db)); in kvm_vcpu_ioctl_x86_set_debugregs()
4479 vcpu->arch.dr6 = dbgregs->dr6; in kvm_vcpu_ioctl_x86_set_debugregs()
4480 vcpu->arch.dr7 = dbgregs->dr7; in kvm_vcpu_ioctl_x86_set_debugregs()
4490 struct xregs_state *xsave = &vcpu->arch.guest_fpu->state.xsave; in fill_xsave()
4491 u64 xstate_bv = xsave->header.xfeatures; in fill_xsave()
4501 xstate_bv &= vcpu->arch.guest_supported_xcr0 | XFEATURE_MASK_FPSSE; in fill_xsave()
4506 * non-compacted offset. in fill_xsave()
4510 u64 xfeature_mask = valid & -valid; in fill_xsave()
4511 int xfeature_nr = fls64(xfeature_mask) - 1; in fill_xsave()
4515 u32 size, offset, ecx, edx; in fill_xsave() local
4517 &size, &offset, &ecx, &edx); in fill_xsave()
4519 memcpy(dest + offset, &vcpu->arch.pkru, in fill_xsave()
4520 sizeof(vcpu->arch.pkru)); in fill_xsave()
4526 valid -= xfeature_mask; in fill_xsave()
4532 struct xregs_state *xsave = &vcpu->arch.guest_fpu->state.xsave; in load_xsave()
4543 xsave->header.xfeatures = xstate_bv; in load_xsave()
4545 xsave->header.xcomp_bv = host_xcr0 | XSTATE_COMPACTION_ENABLED; in load_xsave()
4548 * Copy each region from the non-compacted offset to the in load_xsave()
4553 u64 xfeature_mask = valid & -valid; in load_xsave()
4554 int xfeature_nr = fls64(xfeature_mask) - 1; in load_xsave()
4558 u32 size, offset, ecx, edx; in load_xsave() local
4560 &size, &offset, &ecx, &edx); in load_xsave()
4562 memcpy(&vcpu->arch.pkru, src + offset, in load_xsave()
4563 sizeof(vcpu->arch.pkru)); in load_xsave()
4568 valid -= xfeature_mask; in load_xsave()
4577 fill_xsave((u8 *) guest_xsave->region, vcpu); in kvm_vcpu_ioctl_x86_get_xsave()
4579 memcpy(guest_xsave->region, in kvm_vcpu_ioctl_x86_get_xsave()
4580 &vcpu->arch.guest_fpu->state.fxsave, in kvm_vcpu_ioctl_x86_get_xsave()
4582 *(u64 *)&guest_xsave->region[XSAVE_HDR_OFFSET / sizeof(u32)] = in kvm_vcpu_ioctl_x86_get_xsave()
4593 *(u64 *)&guest_xsave->region[XSAVE_HDR_OFFSET / sizeof(u32)]; in kvm_vcpu_ioctl_x86_set_xsave()
4594 u32 mxcsr = *(u32 *)&guest_xsave->region[XSAVE_MXCSR_OFFSET / sizeof(u32)]; in kvm_vcpu_ioctl_x86_set_xsave()
4603 return -EINVAL; in kvm_vcpu_ioctl_x86_set_xsave()
4604 load_xsave(vcpu, (u8 *)guest_xsave->region); in kvm_vcpu_ioctl_x86_set_xsave()
4608 return -EINVAL; in kvm_vcpu_ioctl_x86_set_xsave()
4609 memcpy(&vcpu->arch.guest_fpu->state.fxsave, in kvm_vcpu_ioctl_x86_set_xsave()
4610 guest_xsave->region, sizeof(struct fxregs_state)); in kvm_vcpu_ioctl_x86_set_xsave()
4619 guest_xcrs->nr_xcrs = 0; in kvm_vcpu_ioctl_x86_get_xcrs()
4623 guest_xcrs->nr_xcrs = 1; in kvm_vcpu_ioctl_x86_get_xcrs()
4624 guest_xcrs->flags = 0; in kvm_vcpu_ioctl_x86_get_xcrs()
4625 guest_xcrs->xcrs[0].xcr = XCR_XFEATURE_ENABLED_MASK; in kvm_vcpu_ioctl_x86_get_xcrs()
4626 guest_xcrs->xcrs[0].value = vcpu->arch.xcr0; in kvm_vcpu_ioctl_x86_get_xcrs()
4635 return -EINVAL; in kvm_vcpu_ioctl_x86_set_xcrs()
4637 if (guest_xcrs->nr_xcrs > KVM_MAX_XCRS || guest_xcrs->flags) in kvm_vcpu_ioctl_x86_set_xcrs()
4638 return -EINVAL; in kvm_vcpu_ioctl_x86_set_xcrs()
4640 for (i = 0; i < guest_xcrs->nr_xcrs; i++) in kvm_vcpu_ioctl_x86_set_xcrs()
4642 if (guest_xcrs->xcrs[i].xcr == XCR_XFEATURE_ENABLED_MASK) { in kvm_vcpu_ioctl_x86_set_xcrs()
4644 guest_xcrs->xcrs[i].value); in kvm_vcpu_ioctl_x86_set_xcrs()
4648 r = -EINVAL; in kvm_vcpu_ioctl_x86_set_xcrs()
4660 if (!vcpu->arch.pv_time_enabled) in kvm_set_guest_paused()
4661 return -EINVAL; in kvm_set_guest_paused()
4662 vcpu->arch.pvclock_set_guest_stopped_request = true; in kvm_set_guest_paused()
4674 if (cap->flags) in kvm_vcpu_ioctl_enable_cap()
4675 return -EINVAL; in kvm_vcpu_ioctl_enable_cap()
4677 switch (cap->cap) { in kvm_vcpu_ioctl_enable_cap()
4679 if (cap->args[0]) in kvm_vcpu_ioctl_enable_cap()
4680 return -EINVAL; in kvm_vcpu_ioctl_enable_cap()
4684 if (!irqchip_in_kernel(vcpu->kvm)) in kvm_vcpu_ioctl_enable_cap()
4685 return -EINVAL; in kvm_vcpu_ioctl_enable_cap()
4686 return kvm_hv_activate_synic(vcpu, cap->cap == in kvm_vcpu_ioctl_enable_cap()
4689 if (!kvm_x86_ops.nested_ops->enable_evmcs) in kvm_vcpu_ioctl_enable_cap()
4690 return -ENOTTY; in kvm_vcpu_ioctl_enable_cap()
4691 r = kvm_x86_ops.nested_ops->enable_evmcs(vcpu, &vmcs_version); in kvm_vcpu_ioctl_enable_cap()
4693 user_ptr = (void __user *)(uintptr_t)cap->args[0]; in kvm_vcpu_ioctl_enable_cap()
4696 r = -EFAULT; in kvm_vcpu_ioctl_enable_cap()
4701 return -ENOTTY; in kvm_vcpu_ioctl_enable_cap()
4706 vcpu->arch.pv_cpuid.enforce = cap->args[0]; in kvm_vcpu_ioctl_enable_cap()
4707 if (vcpu->arch.pv_cpuid.enforce) in kvm_vcpu_ioctl_enable_cap()
4713 return -EINVAL; in kvm_vcpu_ioctl_enable_cap()
4720 struct kvm_vcpu *vcpu = filp->private_data; in kvm_arch_vcpu_ioctl()
4735 r = -EINVAL; in kvm_arch_vcpu_ioctl()
4741 r = -ENOMEM; in kvm_arch_vcpu_ioctl()
4747 r = -EFAULT; in kvm_arch_vcpu_ioctl()
4754 r = -EINVAL; in kvm_arch_vcpu_ioctl()
4769 r = -EFAULT; in kvm_arch_vcpu_ioctl()
4787 r = -EFAULT; in kvm_arch_vcpu_ioctl()
4790 r = kvm_vcpu_ioctl_set_cpuid(vcpu, &cpuid, cpuid_arg->entries); in kvm_arch_vcpu_ioctl()
4797 r = -EFAULT; in kvm_arch_vcpu_ioctl()
4801 cpuid_arg->entries); in kvm_arch_vcpu_ioctl()
4808 r = -EFAULT; in kvm_arch_vcpu_ioctl()
4812 cpuid_arg->entries); in kvm_arch_vcpu_ioctl()
4815 r = -EFAULT; in kvm_arch_vcpu_ioctl()
4822 int idx = srcu_read_lock(&vcpu->kvm->srcu); in kvm_arch_vcpu_ioctl()
4824 srcu_read_unlock(&vcpu->kvm->srcu, idx); in kvm_arch_vcpu_ioctl()
4828 int idx = srcu_read_lock(&vcpu->kvm->srcu); in kvm_arch_vcpu_ioctl()
4830 srcu_read_unlock(&vcpu->kvm->srcu, idx); in kvm_arch_vcpu_ioctl()
4836 r = -EFAULT; in kvm_arch_vcpu_ioctl()
4842 r = -EFAULT; in kvm_arch_vcpu_ioctl()
4852 r = -EINVAL; in kvm_arch_vcpu_ioctl()
4855 r = -EFAULT; in kvm_arch_vcpu_ioctl()
4858 idx = srcu_read_lock(&vcpu->kvm->srcu); in kvm_arch_vcpu_ioctl()
4860 srcu_read_unlock(&vcpu->kvm->srcu, idx); in kvm_arch_vcpu_ioctl()
4866 r = -EFAULT; in kvm_arch_vcpu_ioctl()
4875 r = -EFAULT; in kvm_arch_vcpu_ioctl()
4886 r = -EFAULT; in kvm_arch_vcpu_ioctl()
4895 r = -EFAULT; in kvm_arch_vcpu_ioctl()
4907 r = -EFAULT; in kvm_arch_vcpu_ioctl()
4917 r = -EFAULT; in kvm_arch_vcpu_ioctl()
4927 r = -ENOMEM; in kvm_arch_vcpu_ioctl()
4933 r = -EFAULT; in kvm_arch_vcpu_ioctl()
4951 r = -ENOMEM; in kvm_arch_vcpu_ioctl()
4957 r = -EFAULT; in kvm_arch_vcpu_ioctl()
4977 r = -EINVAL; in kvm_arch_vcpu_ioctl()
4993 r = vcpu->arch.virtual_tsc_khz; in kvm_arch_vcpu_ioctl()
5003 r = -EFAULT; in kvm_arch_vcpu_ioctl()
5013 r = -EINVAL; in kvm_arch_vcpu_ioctl()
5014 if (!kvm_x86_ops.nested_ops->get_state) in kvm_arch_vcpu_ioctl()
5017 BUILD_BUG_ON(sizeof(user_data_size) != sizeof(user_kvm_nested_state->size)); in kvm_arch_vcpu_ioctl()
5018 r = -EFAULT; in kvm_arch_vcpu_ioctl()
5019 if (get_user(user_data_size, &user_kvm_nested_state->size)) in kvm_arch_vcpu_ioctl()
5022 r = kvm_x86_ops.nested_ops->get_state(vcpu, user_kvm_nested_state, in kvm_arch_vcpu_ioctl()
5028 if (put_user(r, &user_kvm_nested_state->size)) in kvm_arch_vcpu_ioctl()
5029 r = -EFAULT; in kvm_arch_vcpu_ioctl()
5031 r = -E2BIG; in kvm_arch_vcpu_ioctl()
5043 r = -EINVAL; in kvm_arch_vcpu_ioctl()
5044 if (!kvm_x86_ops.nested_ops->set_state) in kvm_arch_vcpu_ioctl()
5047 r = -EFAULT; in kvm_arch_vcpu_ioctl()
5051 r = -EINVAL; in kvm_arch_vcpu_ioctl()
5066 idx = srcu_read_lock(&vcpu->kvm->srcu); in kvm_arch_vcpu_ioctl()
5067 r = kvm_x86_ops.nested_ops->set_state(vcpu, user_kvm_nested_state, &kvm_state); in kvm_arch_vcpu_ioctl()
5068 srcu_read_unlock(&vcpu->kvm->srcu, idx); in kvm_arch_vcpu_ioctl()
5075 r = -EFAULT; in kvm_arch_vcpu_ioctl()
5080 cpuid_arg->entries); in kvm_arch_vcpu_ioctl()
5084 r = -EFAULT; in kvm_arch_vcpu_ioctl()
5091 r = -EINVAL; in kvm_arch_vcpu_ioctl()
5109 if (addr > (unsigned int)(-3 * PAGE_SIZE)) in kvm_vm_ioctl_set_tss_addr()
5110 return -EINVAL; in kvm_vm_ioctl_set_tss_addr()
5125 return -EINVAL; in kvm_vm_ioctl_set_nr_mmu_pages()
5127 mutex_lock(&kvm->slots_lock); in kvm_vm_ioctl_set_nr_mmu_pages()
5130 kvm->arch.n_requested_mmu_pages = kvm_nr_mmu_pages; in kvm_vm_ioctl_set_nr_mmu_pages()
5132 mutex_unlock(&kvm->slots_lock); in kvm_vm_ioctl_set_nr_mmu_pages()
5138 return kvm->arch.n_max_mmu_pages; in kvm_vm_ioctl_get_nr_mmu_pages()
5143 struct kvm_pic *pic = kvm->arch.vpic; in kvm_vm_ioctl_get_irqchip()
5147 switch (chip->chip_id) { in kvm_vm_ioctl_get_irqchip()
5149 memcpy(&chip->chip.pic, &pic->pics[0], in kvm_vm_ioctl_get_irqchip()
5153 memcpy(&chip->chip.pic, &pic->pics[1], in kvm_vm_ioctl_get_irqchip()
5157 kvm_get_ioapic(kvm, &chip->chip.ioapic); in kvm_vm_ioctl_get_irqchip()
5160 r = -EINVAL; in kvm_vm_ioctl_get_irqchip()
5168 struct kvm_pic *pic = kvm->arch.vpic; in kvm_vm_ioctl_set_irqchip()
5172 switch (chip->chip_id) { in kvm_vm_ioctl_set_irqchip()
5174 spin_lock(&pic->lock); in kvm_vm_ioctl_set_irqchip()
5175 memcpy(&pic->pics[0], &chip->chip.pic, in kvm_vm_ioctl_set_irqchip()
5177 spin_unlock(&pic->lock); in kvm_vm_ioctl_set_irqchip()
5180 spin_lock(&pic->lock); in kvm_vm_ioctl_set_irqchip()
5181 memcpy(&pic->pics[1], &chip->chip.pic, in kvm_vm_ioctl_set_irqchip()
5183 spin_unlock(&pic->lock); in kvm_vm_ioctl_set_irqchip()
5186 kvm_set_ioapic(kvm, &chip->chip.ioapic); in kvm_vm_ioctl_set_irqchip()
5189 r = -EINVAL; in kvm_vm_ioctl_set_irqchip()
5198 struct kvm_kpit_state *kps = &kvm->arch.vpit->pit_state; in kvm_vm_ioctl_get_pit()
5200 BUILD_BUG_ON(sizeof(*ps) != sizeof(kps->channels)); in kvm_vm_ioctl_get_pit()
5202 mutex_lock(&kps->lock); in kvm_vm_ioctl_get_pit()
5203 memcpy(ps, &kps->channels, sizeof(*ps)); in kvm_vm_ioctl_get_pit()
5204 mutex_unlock(&kps->lock); in kvm_vm_ioctl_get_pit()
5211 struct kvm_pit *pit = kvm->arch.vpit; in kvm_vm_ioctl_set_pit()
5213 mutex_lock(&pit->pit_state.lock); in kvm_vm_ioctl_set_pit()
5214 memcpy(&pit->pit_state.channels, ps, sizeof(*ps)); in kvm_vm_ioctl_set_pit()
5216 kvm_pit_load_count(pit, i, ps->channels[i].count, 0); in kvm_vm_ioctl_set_pit()
5217 mutex_unlock(&pit->pit_state.lock); in kvm_vm_ioctl_set_pit()
5223 mutex_lock(&kvm->arch.vpit->pit_state.lock); in kvm_vm_ioctl_get_pit2()
5224 memcpy(ps->channels, &kvm->arch.vpit->pit_state.channels, in kvm_vm_ioctl_get_pit2()
5225 sizeof(ps->channels)); in kvm_vm_ioctl_get_pit2()
5226 ps->flags = kvm->arch.vpit->pit_state.flags; in kvm_vm_ioctl_get_pit2()
5227 mutex_unlock(&kvm->arch.vpit->pit_state.lock); in kvm_vm_ioctl_get_pit2()
5228 memset(&ps->reserved, 0, sizeof(ps->reserved)); in kvm_vm_ioctl_get_pit2()
5237 struct kvm_pit *pit = kvm->arch.vpit; in kvm_vm_ioctl_set_pit2()
5239 mutex_lock(&pit->pit_state.lock); in kvm_vm_ioctl_set_pit2()
5240 prev_legacy = pit->pit_state.flags & KVM_PIT_FLAGS_HPET_LEGACY; in kvm_vm_ioctl_set_pit2()
5241 cur_legacy = ps->flags & KVM_PIT_FLAGS_HPET_LEGACY; in kvm_vm_ioctl_set_pit2()
5244 memcpy(&pit->pit_state.channels, &ps->channels, in kvm_vm_ioctl_set_pit2()
5245 sizeof(pit->pit_state.channels)); in kvm_vm_ioctl_set_pit2()
5246 pit->pit_state.flags = ps->flags; in kvm_vm_ioctl_set_pit2()
5248 kvm_pit_load_count(pit, i, pit->pit_state.channels[i].count, in kvm_vm_ioctl_set_pit2()
5250 mutex_unlock(&pit->pit_state.lock); in kvm_vm_ioctl_set_pit2()
5257 struct kvm_pit *pit = kvm->arch.vpit; in kvm_vm_ioctl_reinject()
5259 /* pit->pit_state.lock was overloaded to prevent userspace from getting in kvm_vm_ioctl_reinject()
5263 mutex_lock(&pit->pit_state.lock); in kvm_vm_ioctl_reinject()
5264 kvm_pit_set_reinject(pit, control->pit_reinject); in kvm_vm_ioctl_reinject()
5265 mutex_unlock(&pit->pit_state.lock); in kvm_vm_ioctl_reinject()
5273 * Flush potentially hardware-cached dirty pages to dirty_bitmap. in kvm_arch_sync_dirty_log()
5283 return -ENXIO; in kvm_vm_ioctl_irq_line()
5285 irq_event->status = kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID, in kvm_vm_ioctl_irq_line()
5286 irq_event->irq, irq_event->level, in kvm_vm_ioctl_irq_line()
5296 if (cap->flags) in kvm_vm_ioctl_enable_cap()
5297 return -EINVAL; in kvm_vm_ioctl_enable_cap()
5299 switch (cap->cap) { in kvm_vm_ioctl_enable_cap()
5301 kvm->arch.disabled_quirks = cap->args[0]; in kvm_vm_ioctl_enable_cap()
5305 mutex_lock(&kvm->lock); in kvm_vm_ioctl_enable_cap()
5306 r = -EINVAL; in kvm_vm_ioctl_enable_cap()
5307 if (cap->args[0] > MAX_NR_RESERVED_IOAPIC_PINS) in kvm_vm_ioctl_enable_cap()
5309 r = -EEXIST; in kvm_vm_ioctl_enable_cap()
5312 if (kvm->created_vcpus) in kvm_vm_ioctl_enable_cap()
5319 kvm->arch.irqchip_mode = KVM_IRQCHIP_SPLIT; in kvm_vm_ioctl_enable_cap()
5320 kvm->arch.nr_reserved_ioapic_pins = cap->args[0]; in kvm_vm_ioctl_enable_cap()
5323 mutex_unlock(&kvm->lock); in kvm_vm_ioctl_enable_cap()
5327 r = -EINVAL; in kvm_vm_ioctl_enable_cap()
5328 if (cap->args[0] & ~KVM_X2APIC_API_VALID_FLAGS) in kvm_vm_ioctl_enable_cap()
5331 if (cap->args[0] & KVM_X2APIC_API_USE_32BIT_IDS) in kvm_vm_ioctl_enable_cap()
5332 kvm->arch.x2apic_format = true; in kvm_vm_ioctl_enable_cap()
5333 if (cap->args[0] & KVM_X2APIC_API_DISABLE_BROADCAST_QUIRK) in kvm_vm_ioctl_enable_cap()
5334 kvm->arch.x2apic_broadcast_quirk_disabled = true; in kvm_vm_ioctl_enable_cap()
5339 r = -EINVAL; in kvm_vm_ioctl_enable_cap()
5340 if (cap->args[0] & ~KVM_X86_DISABLE_VALID_EXITS) in kvm_vm_ioctl_enable_cap()
5343 if ((cap->args[0] & KVM_X86_DISABLE_EXITS_MWAIT) && in kvm_vm_ioctl_enable_cap()
5345 kvm->arch.mwait_in_guest = true; in kvm_vm_ioctl_enable_cap()
5346 if (cap->args[0] & KVM_X86_DISABLE_EXITS_HLT) in kvm_vm_ioctl_enable_cap()
5347 kvm->arch.hlt_in_guest = true; in kvm_vm_ioctl_enable_cap()
5348 if (cap->args[0] & KVM_X86_DISABLE_EXITS_PAUSE) in kvm_vm_ioctl_enable_cap()
5349 kvm->arch.pause_in_guest = true; in kvm_vm_ioctl_enable_cap()
5350 if (cap->args[0] & KVM_X86_DISABLE_EXITS_CSTATE) in kvm_vm_ioctl_enable_cap()
5351 kvm->arch.cstate_in_guest = true; in kvm_vm_ioctl_enable_cap()
5355 kvm->arch.guest_can_read_msr_platform_info = cap->args[0]; in kvm_vm_ioctl_enable_cap()
5359 kvm->arch.exception_payload_enabled = cap->args[0]; in kvm_vm_ioctl_enable_cap()
5363 r = -EINVAL; in kvm_vm_ioctl_enable_cap()
5364 if (cap->args[0] & ~(KVM_MSR_EXIT_REASON_INVAL | in kvm_vm_ioctl_enable_cap()
5368 kvm->arch.user_space_msr_mask = cap->args[0]; in kvm_vm_ioctl_enable_cap()
5372 r = -EINVAL; in kvm_vm_ioctl_enable_cap()
5386 msr_filter->default_allow = default_allow; in kvm_alloc_msr_filter()
5397 for (i = 0; i < msr_filter->count; i++) in kvm_free_msr_filter()
5398 kfree(msr_filter->ranges[i].bitmap); in kvm_free_msr_filter()
5411 if (!user_range->nmsrs) in kvm_add_msr_filter()
5414 bitmap_size = BITS_TO_LONGS(user_range->nmsrs) * sizeof(long); in kvm_add_msr_filter()
5416 return -EINVAL; in kvm_add_msr_filter()
5418 bitmap = memdup_user((__user u8*)user_range->bitmap, bitmap_size); in kvm_add_msr_filter()
5423 .flags = user_range->flags, in kvm_add_msr_filter()
5424 .base = user_range->base, in kvm_add_msr_filter()
5425 .nmsrs = user_range->nmsrs, in kvm_add_msr_filter()
5430 r = -EINVAL; in kvm_add_msr_filter()
5435 r = -EINVAL; in kvm_add_msr_filter()
5440 msr_filter->ranges[msr_filter->count] = range; in kvm_add_msr_filter()
5441 msr_filter->count++; in kvm_add_msr_filter()
5458 if (filter->flags & ~KVM_MSR_FILTER_DEFAULT_DENY) in kvm_vm_ioctl_set_msr_filter()
5459 return -EINVAL; in kvm_vm_ioctl_set_msr_filter()
5461 for (i = 0; i < ARRAY_SIZE(filter->ranges); i++) in kvm_vm_ioctl_set_msr_filter()
5462 empty &= !filter->ranges[i].nmsrs; in kvm_vm_ioctl_set_msr_filter()
5464 default_allow = !(filter->flags & KVM_MSR_FILTER_DEFAULT_DENY); in kvm_vm_ioctl_set_msr_filter()
5466 return -EINVAL; in kvm_vm_ioctl_set_msr_filter()
5470 return -ENOMEM; in kvm_vm_ioctl_set_msr_filter()
5472 for (i = 0; i < ARRAY_SIZE(filter->ranges); i++) { in kvm_vm_ioctl_set_msr_filter()
5473 r = kvm_add_msr_filter(new_filter, &filter->ranges[i]); in kvm_vm_ioctl_set_msr_filter()
5480 mutex_lock(&kvm->lock); in kvm_vm_ioctl_set_msr_filter()
5482 /* The per-VM filter is protected by kvm->lock... */ in kvm_vm_ioctl_set_msr_filter()
5483 old_filter = srcu_dereference_check(kvm->arch.msr_filter, &kvm->srcu, 1); in kvm_vm_ioctl_set_msr_filter()
5485 rcu_assign_pointer(kvm->arch.msr_filter, new_filter); in kvm_vm_ioctl_set_msr_filter()
5486 synchronize_srcu(&kvm->srcu); in kvm_vm_ioctl_set_msr_filter()
5491 mutex_unlock(&kvm->lock); in kvm_vm_ioctl_set_msr_filter()
5516 struct kvm *kvm = filp->private_data; in kvm_arch_vm_compat_ioctl()
5517 long r = -ENOTTY; in kvm_arch_vm_compat_ioctl()
5528 return -EFAULT; in kvm_arch_vm_compat_ioctl()
5536 .flags = cr->flags, in kvm_arch_vm_compat_ioctl()
5537 .nmsrs = cr->nmsrs, in kvm_arch_vm_compat_ioctl()
5538 .base = cr->base, in kvm_arch_vm_compat_ioctl()
5539 .bitmap = (__u8 *)(ulong)cr->bitmap, in kvm_arch_vm_compat_ioctl()
5555 struct kvm *kvm = filp->private_data; in kvm_arch_vm_ioctl()
5557 int r = -ENOTTY; in kvm_arch_vm_ioctl()
5559 * This union makes it completely explicit to gcc-3.x in kvm_arch_vm_ioctl()
5576 mutex_lock(&kvm->lock); in kvm_arch_vm_ioctl()
5577 r = -EINVAL; in kvm_arch_vm_ioctl()
5578 if (kvm->created_vcpus) in kvm_arch_vm_ioctl()
5580 r = -EFAULT; in kvm_arch_vm_ioctl()
5585 mutex_unlock(&kvm->lock); in kvm_arch_vm_ioctl()
5595 mutex_lock(&kvm->lock); in kvm_arch_vm_ioctl()
5597 r = -EEXIST; in kvm_arch_vm_ioctl()
5601 r = -EINVAL; in kvm_arch_vm_ioctl()
5602 if (kvm->created_vcpus) in kvm_arch_vm_ioctl()
5621 /* Write kvm->irq_routing before enabling irqchip_in_kernel. */ in kvm_arch_vm_ioctl()
5623 kvm->arch.irqchip_mode = KVM_IRQCHIP_KERNEL; in kvm_arch_vm_ioctl()
5625 mutex_unlock(&kvm->lock); in kvm_arch_vm_ioctl()
5632 r = -EFAULT; in kvm_arch_vm_ioctl()
5637 mutex_lock(&kvm->lock); in kvm_arch_vm_ioctl()
5638 r = -EEXIST; in kvm_arch_vm_ioctl()
5639 if (kvm->arch.vpit) in kvm_arch_vm_ioctl()
5641 r = -ENOMEM; in kvm_arch_vm_ioctl()
5642 kvm->arch.vpit = kvm_create_pit(kvm, u.pit_config.flags); in kvm_arch_vm_ioctl()
5643 if (kvm->arch.vpit) in kvm_arch_vm_ioctl()
5646 mutex_unlock(&kvm->lock); in kvm_arch_vm_ioctl()
5658 r = -ENXIO; in kvm_arch_vm_ioctl()
5664 r = -EFAULT; in kvm_arch_vm_ioctl()
5682 r = -ENXIO; in kvm_arch_vm_ioctl()
5691 r = -EFAULT; in kvm_arch_vm_ioctl()
5694 r = -ENXIO; in kvm_arch_vm_ioctl()
5695 if (!kvm->arch.vpit) in kvm_arch_vm_ioctl()
5700 r = -EFAULT; in kvm_arch_vm_ioctl()
5707 r = -EFAULT; in kvm_arch_vm_ioctl()
5710 mutex_lock(&kvm->lock); in kvm_arch_vm_ioctl()
5711 r = -ENXIO; in kvm_arch_vm_ioctl()
5712 if (!kvm->arch.vpit) in kvm_arch_vm_ioctl()
5716 mutex_unlock(&kvm->lock); in kvm_arch_vm_ioctl()
5720 r = -ENXIO; in kvm_arch_vm_ioctl()
5721 if (!kvm->arch.vpit) in kvm_arch_vm_ioctl()
5726 r = -EFAULT; in kvm_arch_vm_ioctl()
5733 r = -EFAULT; in kvm_arch_vm_ioctl()
5736 mutex_lock(&kvm->lock); in kvm_arch_vm_ioctl()
5737 r = -ENXIO; in kvm_arch_vm_ioctl()
5738 if (!kvm->arch.vpit) in kvm_arch_vm_ioctl()
5742 mutex_unlock(&kvm->lock); in kvm_arch_vm_ioctl()
5747 r = -EFAULT; in kvm_arch_vm_ioctl()
5750 r = -ENXIO; in kvm_arch_vm_ioctl()
5751 if (!kvm->arch.vpit) in kvm_arch_vm_ioctl()
5758 mutex_lock(&kvm->lock); in kvm_arch_vm_ioctl()
5759 if (kvm->created_vcpus) in kvm_arch_vm_ioctl()
5760 r = -EBUSY; in kvm_arch_vm_ioctl()
5762 kvm->arch.bsp_vcpu_id = arg; in kvm_arch_vm_ioctl()
5763 mutex_unlock(&kvm->lock); in kvm_arch_vm_ioctl()
5767 r = -EFAULT; in kvm_arch_vm_ioctl()
5770 r = -EINVAL; in kvm_arch_vm_ioctl()
5773 memcpy(&kvm->arch.xen_hvm_config, &xhc, sizeof(xhc)); in kvm_arch_vm_ioctl()
5781 r = -EFAULT; in kvm_arch_vm_ioctl()
5785 r = -EINVAL; in kvm_arch_vm_ioctl()
5797 kvm->arch.kvmclock_offset += user_ns.clock - now_ns; in kvm_arch_vm_ioctl()
5807 user_ns.flags = kvm->arch.use_master_clock ? KVM_CLOCK_TSC_STABLE : 0; in kvm_arch_vm_ioctl()
5810 r = -EFAULT; in kvm_arch_vm_ioctl()
5817 r = -ENOTTY; in kvm_arch_vm_ioctl()
5825 r = -EFAULT; in kvm_arch_vm_ioctl()
5829 r = -ENOTTY; in kvm_arch_vm_ioctl()
5837 r = -EFAULT; in kvm_arch_vm_ioctl()
5841 r = -ENOTTY; in kvm_arch_vm_ioctl()
5849 r = -EFAULT; in kvm_arch_vm_ioctl()
5863 return -EFAULT; in kvm_arch_vm_ioctl()
5869 r = -ENOTTY; in kvm_arch_vm_ioctl()
5930 msrs_to_save_all[i] - MSR_IA32_RTIT_ADDR0_A >= in kvm_init_msr_list()
5935 if (msrs_to_save_all[i] - MSR_ARCH_PERFMON_PERFCTR0 >= in kvm_init_msr_list()
5940 if (msrs_to_save_all[i] - MSR_ARCH_PERFMON_EVENTSEL0 >= in kvm_init_msr_list()
5978 !kvm_iodevice_write(vcpu, &vcpu->arch.apic->dev, addr, n, v)) in vcpu_mmio_write()
5983 len -= n; in vcpu_mmio_write()
5998 !kvm_iodevice_read(vcpu, &vcpu->arch.apic->dev, in vcpu_mmio_read()
6005 len -= n; in vcpu_mmio_read()
6031 /* NPT walks are always user-walks */ in translate_nested_gpa()
6033 t_gpa = vcpu->arch.mmu->gva_to_gpa(vcpu, gpa, access, exception); in translate_nested_gpa()
6042 return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, exception); in kvm_mmu_gva_to_gpa_read()
6050 return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, exception); in kvm_mmu_gva_to_gpa_fetch()
6058 return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, exception); in kvm_mmu_gva_to_gpa_write()
6065 return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, 0, exception); in kvm_mmu_gva_to_gpa_system()
6076 gpa_t gpa = vcpu->arch.walk_mmu->gva_to_gpa(vcpu, addr, access, in kvm_read_guest_virt_helper()
6078 unsigned offset = addr & (PAGE_SIZE-1); in kvm_read_guest_virt_helper()
6079 unsigned toread = min(bytes, (unsigned)PAGE_SIZE - offset); in kvm_read_guest_virt_helper()
6091 bytes -= toread; in kvm_read_guest_virt_helper()
6110 gpa_t gpa = vcpu->arch.walk_mmu->gva_to_gpa(vcpu, addr, access|PFERR_FETCH_MASK, in kvm_fetch_guest_virt()
6115 offset = addr & (PAGE_SIZE-1); in kvm_fetch_guest_virt()
6117 bytes = (unsigned)PAGE_SIZE - offset; in kvm_fetch_guest_virt()
6174 gpa_t gpa = vcpu->arch.walk_mmu->gva_to_gpa(vcpu, addr, in kvm_write_guest_virt_helper()
6177 unsigned offset = addr & (PAGE_SIZE-1); in kvm_write_guest_virt_helper()
6178 unsigned towrite = min(bytes, (unsigned)PAGE_SIZE - offset); in kvm_write_guest_virt_helper()
6189 bytes -= towrite; in kvm_write_guest_virt_helper()
6215 vcpu->arch.l1tf_flush_l1d = true; in kvm_write_guest_virt_system()
6272 && !permission_fault(vcpu, vcpu->arch.walk_mmu, in vcpu_mmio_gva_to_gpa()
6273 vcpu->arch.mmio_access, 0, access)) { in vcpu_mmio_gva_to_gpa()
6274 *gpa = vcpu->arch.mmio_gfn << PAGE_SHIFT | in vcpu_mmio_gva_to_gpa()
6275 (gva & (PAGE_SIZE - 1)); in vcpu_mmio_gva_to_gpa()
6280 *gpa = vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, exception); in vcpu_mmio_gva_to_gpa()
6283 return -1; in vcpu_mmio_gva_to_gpa()
6314 if (vcpu->mmio_read_completed) { in read_prepare()
6316 vcpu->mmio_fragments[0].gpa, val); in read_prepare()
6317 vcpu->mmio_read_completed = 0; in read_prepare()
6352 struct kvm_mmio_fragment *frag = &vcpu->mmio_fragments[0]; in write_exit_mmio()
6354 memcpy(vcpu->run->mmio.data, frag->data, min(8u, frag->len)); in write_exit_mmio()
6380 bool write = ops->write; in emulator_read_write_onepage()
6382 struct x86_emulate_ctxt *ctxt = vcpu->arch.emulate_ctxt; in emulator_read_write_onepage()
6391 if (ctxt->gpa_available && emulator_can_use_gpa(ctxt) && in emulator_read_write_onepage()
6392 (addr & ~PAGE_MASK) == (ctxt->gpa_val & ~PAGE_MASK)) { in emulator_read_write_onepage()
6393 gpa = ctxt->gpa_val; in emulator_read_write_onepage()
6401 if (!ret && ops->read_write_emulate(vcpu, gpa, val, bytes)) in emulator_read_write_onepage()
6407 handled = ops->read_write_mmio(vcpu, gpa, bytes, val); in emulator_read_write_onepage()
6412 bytes -= handled; in emulator_read_write_onepage()
6415 WARN_ON(vcpu->mmio_nr_fragments >= KVM_MAX_MMIO_FRAGMENTS); in emulator_read_write_onepage()
6416 frag = &vcpu->mmio_fragments[vcpu->mmio_nr_fragments++]; in emulator_read_write_onepage()
6417 frag->gpa = gpa; in emulator_read_write_onepage()
6418 frag->data = val; in emulator_read_write_onepage()
6419 frag->len = bytes; in emulator_read_write_onepage()
6433 if (ops->read_write_prepare && in emulator_read_write()
6434 ops->read_write_prepare(vcpu, val, bytes)) in emulator_read_write()
6437 vcpu->mmio_nr_fragments = 0; in emulator_read_write()
6440 if (((addr + bytes - 1) ^ addr) & PAGE_MASK) { in emulator_read_write()
6443 now = -addr & ~PAGE_MASK; in emulator_read_write()
6450 if (ctxt->mode != X86EMUL_MODE_PROT64) in emulator_read_write()
6453 bytes -= now; in emulator_read_write()
6461 if (!vcpu->mmio_nr_fragments) in emulator_read_write()
6464 gpa = vcpu->mmio_fragments[0].gpa; in emulator_read_write()
6466 vcpu->mmio_needed = 1; in emulator_read_write()
6467 vcpu->mmio_cur_fragment = 0; in emulator_read_write()
6469 vcpu->run->mmio.len = min(8u, vcpu->mmio_fragments[0].len); in emulator_read_write()
6470 vcpu->run->mmio.is_write = vcpu->mmio_is_write = ops->write; in emulator_read_write()
6471 vcpu->run->exit_reason = KVM_EXIT_MMIO; in emulator_read_write()
6472 vcpu->run->mmio.phys_addr = gpa; in emulator_read_write()
6474 return ops->read_write_exit_mmio(vcpu, gpa, val, bytes); in emulator_read_write()
6522 if (bytes > 8 || (bytes & (bytes - 1))) in emulator_cmpxchg_emulated()
6536 page_line_mask = ~(cache_line_size() - 1); in emulator_cmpxchg_emulated()
6540 if (((gpa + bytes - 1) & page_line_mask) != (gpa & page_line_mask)) in emulator_cmpxchg_emulated()
6584 for (i = 0; i < vcpu->arch.pio.count; i++) { in kernel_pio()
6585 if (vcpu->arch.pio.in) in kernel_pio()
6586 r = kvm_io_bus_read(vcpu, KVM_PIO_BUS, vcpu->arch.pio.port, in kernel_pio()
6587 vcpu->arch.pio.size, pd); in kernel_pio()
6590 vcpu->arch.pio.port, vcpu->arch.pio.size, in kernel_pio()
6594 pd += vcpu->arch.pio.size; in kernel_pio()
6603 vcpu->arch.pio.port = port; in emulator_pio_in_out()
6604 vcpu->arch.pio.in = in; in emulator_pio_in_out()
6605 vcpu->arch.pio.count = count; in emulator_pio_in_out()
6606 vcpu->arch.pio.size = size; in emulator_pio_in_out()
6608 if (!kernel_pio(vcpu, vcpu->arch.pio_data)) { in emulator_pio_in_out()
6609 vcpu->arch.pio.count = 0; in emulator_pio_in_out()
6613 vcpu->run->exit_reason = KVM_EXIT_IO; in emulator_pio_in_out()
6614 vcpu->run->io.direction = in ? KVM_EXIT_IO_IN : KVM_EXIT_IO_OUT; in emulator_pio_in_out()
6615 vcpu->run->io.size = size; in emulator_pio_in_out()
6616 vcpu->run->io.data_offset = KVM_PIO_PAGE_OFFSET * PAGE_SIZE; in emulator_pio_in_out()
6617 vcpu->run->io.count = count; in emulator_pio_in_out()
6618 vcpu->run->io.port = port; in emulator_pio_in_out()
6628 if (vcpu->arch.pio.count) in emulator_pio_in()
6631 memset(vcpu->arch.pio_data, 0, size * count); in emulator_pio_in()
6636 memcpy(val, vcpu->arch.pio_data, size * count); in emulator_pio_in()
6637 trace_kvm_pio(KVM_PIO_IN, port, size, count, vcpu->arch.pio_data); in emulator_pio_in()
6638 vcpu->arch.pio.count = 0; in emulator_pio_in()
6657 memcpy(vcpu->arch.pio_data, val, size * count); in emulator_pio_out()
6658 trace_kvm_pio(KVM_PIO_OUT, port, size, count, vcpu->arch.pio_data); in emulator_pio_out()
6687 cpumask_set_cpu(cpu, vcpu->arch.wbinvd_dirty_mask); in kvm_emulate_wbinvd_noskip()
6688 smp_call_function_many(vcpu->arch.wbinvd_dirty_mask, in kvm_emulate_wbinvd_noskip()
6691 cpumask_clear(vcpu->arch.wbinvd_dirty_mask); in kvm_emulate_wbinvd_noskip()
6726 return (curr_cr & ~((1ULL << 32) - 1)) | new_val; in mk_cr_64()
6739 value = vcpu->arch.cr2; in emulator_get_cr()
6768 vcpu->arch.cr2 = val; in emulator_set_cr()
6781 res = -1; in emulator_set_cr()
6842 desc->type = var.type; in emulator_get_segment()
6843 desc->s = var.s; in emulator_get_segment()
6844 desc->dpl = var.dpl; in emulator_get_segment()
6845 desc->p = var.present; in emulator_get_segment()
6846 desc->avl = var.avl; in emulator_get_segment()
6847 desc->l = var.l; in emulator_get_segment()
6848 desc->d = var.db; in emulator_get_segment()
6849 desc->g = var.g; in emulator_get_segment()
6867 if (desc->g) in emulator_set_segment()
6869 var.type = desc->type; in emulator_set_segment()
6870 var.dpl = desc->dpl; in emulator_set_segment()
6871 var.db = desc->d; in emulator_set_segment()
6872 var.s = desc->s; in emulator_set_segment()
6873 var.l = desc->l; in emulator_set_segment()
6874 var.g = desc->g; in emulator_set_segment()
6875 var.avl = desc->avl; in emulator_set_segment()
6876 var.present = desc->p; in emulator_set_segment()
6920 return vcpu->arch.smbase; in emulator_get_smbase()
6927 vcpu->arch.smbase = smbase; in emulator_set_smbase()
6944 emul_to_vcpu(ctxt)->arch.halt_request = 1; in emulator_halt()
6952 &ctxt->exception); in emulator_intercept()
6956 u32 *eax, u32 *ebx, u32 *ecx, u32 *edx, in emulator_get_cpuid() argument
6959 return kvm_cpuid(emul_to_vcpu(ctxt), eax, ebx, ecx, edx, exact_only); in emulator_get_cpuid()
6999 return emul_to_vcpu(ctxt)->arch.hflags; in emulator_get_hflags()
7006 vcpu->arch.hflags = emul_flags; in emulator_set_hflags()
7095 struct x86_emulate_ctxt *ctxt = vcpu->arch.emulate_ctxt; in inject_emulated_exception()
7096 if (ctxt->exception.vector == PF_VECTOR) in inject_emulated_exception()
7097 return kvm_inject_emulated_page_fault(vcpu, &ctxt->exception); in inject_emulated_exception()
7099 if (ctxt->exception.error_code_valid) in inject_emulated_exception()
7100 kvm_queue_exception_e(vcpu, ctxt->exception.vector, in inject_emulated_exception()
7101 ctxt->exception.error_code); in inject_emulated_exception()
7103 kvm_queue_exception(vcpu, ctxt->exception.vector); in inject_emulated_exception()
7117 ctxt->vcpu = vcpu; in alloc_emulate_ctxt()
7118 ctxt->ops = &emulate_ops; in alloc_emulate_ctxt()
7119 vcpu->arch.emulate_ctxt = ctxt; in alloc_emulate_ctxt()
7126 struct x86_emulate_ctxt *ctxt = vcpu->arch.emulate_ctxt; in init_emulate_ctxt()
7131 ctxt->gpa_available = false; in init_emulate_ctxt()
7132 ctxt->eflags = kvm_get_rflags(vcpu); in init_emulate_ctxt()
7133 ctxt->tf = (ctxt->eflags & X86_EFLAGS_TF) != 0; in init_emulate_ctxt()
7135 ctxt->eip = kvm_rip_read(vcpu); in init_emulate_ctxt()
7136 ctxt->mode = (!is_protmode(vcpu)) ? X86EMUL_MODE_REAL : in init_emulate_ctxt()
7137 (ctxt->eflags & X86_EFLAGS_VM) ? X86EMUL_MODE_VM86 : in init_emulate_ctxt()
7145 ctxt->interruptibility = 0; in init_emulate_ctxt()
7146 ctxt->have_exception = false; in init_emulate_ctxt()
7147 ctxt->exception.vector = -1; in init_emulate_ctxt()
7148 ctxt->perm_ok = false; in init_emulate_ctxt()
7151 vcpu->arch.emulate_regs_need_sync_from_vcpu = false; in init_emulate_ctxt()
7156 struct x86_emulate_ctxt *ctxt = vcpu->arch.emulate_ctxt; in kvm_inject_realmode_interrupt()
7161 ctxt->op_bytes = 2; in kvm_inject_realmode_interrupt()
7162 ctxt->ad_bytes = 2; in kvm_inject_realmode_interrupt()
7163 ctxt->_eip = ctxt->eip + inc_eip; in kvm_inject_realmode_interrupt()
7169 ctxt->eip = ctxt->_eip; in kvm_inject_realmode_interrupt()
7170 kvm_rip_write(vcpu, ctxt->eip); in kvm_inject_realmode_interrupt()
7171 kvm_set_rflags(vcpu, ctxt->eflags); in kvm_inject_realmode_interrupt()
7178 ++vcpu->stat.insn_emulation_fail; in handle_emulation_failure()
7187 vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; in handle_emulation_failure()
7188 vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION; in handle_emulation_failure()
7189 vcpu->run->internal.ndata = 0; in handle_emulation_failure()
7196 vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; in handle_emulation_failure()
7197 vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION; in handle_emulation_failure()
7198 vcpu->run->internal.ndata = 0; in handle_emulation_failure()
7219 if (!vcpu->arch.mmu->direct_map) { in reexecute_instruction()
7237 * retry instruction -> write #PF -> emulation fail -> retry in reexecute_instruction()
7238 * instruction -> ... in reexecute_instruction()
7240 pfn = gfn_to_pfn(vcpu->kvm, gpa_to_gfn(gpa)); in reexecute_instruction()
7251 /* The instructions are well-emulated on direct mmu. */ in reexecute_instruction()
7252 if (vcpu->arch.mmu->direct_map) { in reexecute_instruction()
7255 spin_lock(&vcpu->kvm->mmu_lock); in reexecute_instruction()
7256 indirect_shadow_pages = vcpu->kvm->arch.indirect_shadow_pages; in reexecute_instruction()
7257 spin_unlock(&vcpu->kvm->mmu_lock); in reexecute_instruction()
7260 kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(gpa)); in reexecute_instruction()
7267 * and it failed try to unshadow page and re-enter the in reexecute_instruction()
7270 kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(gpa)); in reexecute_instruction()
7286 last_retry_eip = vcpu->arch.last_retry_eip; in retry_instruction()
7287 last_retry_addr = vcpu->arch.last_retry_addr; in retry_instruction()
7290 * If the emulation is caused by #PF and it is non-page_table in retry_instruction()
7291 * writing instruction, it means the VM-EXIT is caused by shadow in retry_instruction()
7295 * Note: if the guest uses a non-page-table modifying instruction in retry_instruction()
7302 vcpu->arch.last_retry_eip = vcpu->arch.last_retry_addr = 0; in retry_instruction()
7314 if (ctxt->eip == last_retry_eip && last_retry_addr == cr2_or_gpa) in retry_instruction()
7317 vcpu->arch.last_retry_eip = ctxt->eip; in retry_instruction()
7318 vcpu->arch.last_retry_addr = cr2_or_gpa; in retry_instruction()
7320 if (!vcpu->arch.mmu->direct_map) in retry_instruction()
7323 kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(gpa)); in retry_instruction()
7333 if (!(vcpu->arch.hflags & HF_SMM_MASK)) { in kvm_smm_changed()
7335 trace_kvm_enter_smm(vcpu->vcpu_id, vcpu->arch.smbase, false); in kvm_smm_changed()
7361 struct kvm_run *kvm_run = vcpu->run; in kvm_vcpu_do_singlestep()
7363 if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) { in kvm_vcpu_do_singlestep()
7364 kvm_run->debug.arch.dr6 = DR6_BS | DR6_FIXED_1 | DR6_RTM; in kvm_vcpu_do_singlestep()
7365 kvm_run->debug.arch.pc = kvm_get_linear_rip(vcpu); in kvm_vcpu_do_singlestep()
7366 kvm_run->debug.arch.exception = DB_VECTOR; in kvm_vcpu_do_singlestep()
7367 kvm_run->exit_reason = KVM_EXIT_DEBUG; in kvm_vcpu_do_singlestep()
7399 if (unlikely(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) && in kvm_vcpu_check_code_breakpoint()
7400 (vcpu->arch.guest_debug_dr7 & DR7_BP_EN_MASK)) { in kvm_vcpu_check_code_breakpoint()
7401 struct kvm_run *kvm_run = vcpu->run; in kvm_vcpu_check_code_breakpoint()
7404 vcpu->arch.guest_debug_dr7, in kvm_vcpu_check_code_breakpoint()
7405 vcpu->arch.eff_db); in kvm_vcpu_check_code_breakpoint()
7408 kvm_run->debug.arch.dr6 = dr6 | DR6_FIXED_1 | DR6_RTM; in kvm_vcpu_check_code_breakpoint()
7409 kvm_run->debug.arch.pc = eip; in kvm_vcpu_check_code_breakpoint()
7410 kvm_run->debug.arch.exception = DB_VECTOR; in kvm_vcpu_check_code_breakpoint()
7411 kvm_run->exit_reason = KVM_EXIT_DEBUG; in kvm_vcpu_check_code_breakpoint()
7417 if (unlikely(vcpu->arch.dr7 & DR7_BP_EN_MASK) && in kvm_vcpu_check_code_breakpoint()
7421 vcpu->arch.dr7, in kvm_vcpu_check_code_breakpoint()
7422 vcpu->arch.db); in kvm_vcpu_check_code_breakpoint()
7436 switch (ctxt->opcode_len) { in is_vmware_backdoor_opcode()
7438 switch (ctxt->b) { in is_vmware_backdoor_opcode()
7455 switch (ctxt->b) { in is_vmware_backdoor_opcode()
7468 * (and wrong) when emulating on an intercepted fault-like exception[*], as
7478 struct x86_emulate_ctxt *ctxt = vcpu->arch.emulate_ctxt; in x86_decode_emulated_instruction()
7483 ctxt->ud = emulation_type & EMULTYPE_TRAP_UD; in x86_decode_emulated_instruction()
7488 ++vcpu->stat.insn_emulation; in x86_decode_emulated_instruction()
7498 struct x86_emulate_ctxt *ctxt = vcpu->arch.emulate_ctxt; in x86_emulate_instruction()
7505 vcpu->arch.l1tf_flush_l1d = true; in x86_emulate_instruction()
7511 write_fault_to_spt = vcpu->arch.write_fault_to_shadow_pgtable; in x86_emulate_instruction()
7512 vcpu->arch.write_fault_to_shadow_pgtable = false; in x86_emulate_instruction()
7519 * are fault-like and are higher priority than any faults on in x86_emulate_instruction()
7538 if (ctxt->have_exception) { in x86_emulate_instruction()
7540 * #UD should result in just EMULATION_FAILED, and trap-like in x86_emulate_instruction()
7543 WARN_ON_ONCE(ctxt->exception.vector == UD_VECTOR || in x86_emulate_instruction()
7544 exception_type(ctxt->exception.vector) == EXCPT_TRAP); in x86_emulate_instruction()
7561 * updating interruptibility state and injecting single-step #DBs. in x86_emulate_instruction()
7564 kvm_rip_write(vcpu, ctxt->_eip); in x86_emulate_instruction()
7565 if (ctxt->eflags & X86_EFLAGS_RF) in x86_emulate_instruction()
7566 kvm_set_rflags(vcpu, ctxt->eflags & ~X86_EFLAGS_RF); in x86_emulate_instruction()
7575 if (vcpu->arch.emulate_regs_need_sync_from_vcpu) { in x86_emulate_instruction()
7576 vcpu->arch.emulate_regs_need_sync_from_vcpu = false; in x86_emulate_instruction()
7583 ctxt->exception.address = cr2_or_gpa; in x86_emulate_instruction()
7586 if (vcpu->arch.mmu->direct_map) { in x86_emulate_instruction()
7587 ctxt->gpa_available = true; in x86_emulate_instruction()
7588 ctxt->gpa_val = cr2_or_gpa; in x86_emulate_instruction()
7592 ctxt->exception.address = 0; in x86_emulate_instruction()
7608 if (ctxt->have_exception) { in x86_emulate_instruction()
7612 } else if (vcpu->arch.pio.count) { in x86_emulate_instruction()
7613 if (!vcpu->arch.pio.in) { in x86_emulate_instruction()
7614 /* FIXME: return into emulator if single-stepping. */ in x86_emulate_instruction()
7615 vcpu->arch.pio.count = 0; in x86_emulate_instruction()
7618 vcpu->arch.complete_userspace_io = complete_emulated_pio; in x86_emulate_instruction()
7621 } else if (vcpu->mmio_needed) { in x86_emulate_instruction()
7622 ++vcpu->stat.mmio_exits; in x86_emulate_instruction()
7624 if (!vcpu->mmio_is_write) in x86_emulate_instruction()
7627 vcpu->arch.complete_userspace_io = complete_emulated_mmio; in x86_emulate_instruction()
7635 toggle_interruptibility(vcpu, ctxt->interruptibility); in x86_emulate_instruction()
7636 vcpu->arch.emulate_regs_need_sync_to_vcpu = false; in x86_emulate_instruction()
7639 * Note, EXCPT_DB is assumed to be fault-like as the emulator in x86_emulate_instruction()
7641 * of which are fault-like. in x86_emulate_instruction()
7643 if (!ctxt->have_exception || in x86_emulate_instruction()
7644 exception_type(ctxt->exception.vector) == EXCPT_TRAP) { in x86_emulate_instruction()
7645 kvm_rip_write(vcpu, ctxt->eip); in x86_emulate_instruction()
7646 if (r && (ctxt->tf || (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP))) in x86_emulate_instruction()
7650 __kvm_set_rflags(vcpu, ctxt->eflags); in x86_emulate_instruction()
7659 if (unlikely((ctxt->eflags & ~rflags) & X86_EFLAGS_IF)) in x86_emulate_instruction()
7662 vcpu->arch.emulate_regs_need_sync_to_vcpu = true; in x86_emulate_instruction()
7682 vcpu->arch.pio.count = 0; in complete_fast_pio_out_port_0x7e()
7688 vcpu->arch.pio.count = 0; in complete_fast_pio_out()
7690 if (unlikely(!kvm_is_linear_rip(vcpu, vcpu->arch.pio.linear_rip))) in complete_fast_pio_out()
7710 kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_OUT_7E_INC_RIP)) { in kvm_fast_pio_out()
7711 vcpu->arch.complete_userspace_io = in kvm_fast_pio_out()
7715 vcpu->arch.pio.linear_rip = kvm_get_linear_rip(vcpu); in kvm_fast_pio_out()
7716 vcpu->arch.complete_userspace_io = complete_fast_pio_out; in kvm_fast_pio_out()
7726 BUG_ON(vcpu->arch.pio.count != 1); in complete_fast_pio_in()
7728 if (unlikely(!kvm_is_linear_rip(vcpu, vcpu->arch.pio.linear_rip))) { in complete_fast_pio_in()
7729 vcpu->arch.pio.count = 0; in complete_fast_pio_in()
7734 val = (vcpu->arch.pio.size < 4) ? kvm_rax_read(vcpu) : 0; in complete_fast_pio_in()
7737 * Since vcpu->arch.pio.count == 1 let emulator_pio_in perform in complete_fast_pio_in()
7740 emulator_pio_in(vcpu, vcpu->arch.pio.size, vcpu->arch.pio.port, &val, 1); in complete_fast_pio_in()
7761 vcpu->arch.pio.linear_rip = kvm_get_linear_rip(vcpu); in kvm_fast_pio_in()
7762 vcpu->arch.complete_userspace_io = complete_fast_pio_in; in kvm_fast_pio_in()
7791 khz = freq->new; in tsc_khz_changed()
7812 /* TSC frequency always matches when on Hyper-V */ in kvm_hyperv_tsc_notifier()
7818 struct kvm_arch *ka = &kvm->arch; in kvm_hyperv_tsc_notifier()
7820 spin_lock(&ka->pvclock_gtod_sync_lock); in kvm_hyperv_tsc_notifier()
7830 spin_unlock(&ka->pvclock_gtod_sync_lock); in kvm_hyperv_tsc_notifier()
7886 if (vcpu->cpu != cpu) in __kvmclock_cpufreq_notifier()
7889 if (vcpu->cpu != raw_smp_processor_id()) in __kvmclock_cpufreq_notifier()
7895 if (freq->old < freq->new && send_ipi) { in __kvmclock_cpufreq_notifier()
7918 if (val == CPUFREQ_PRECHANGE && freq->old > freq->new) in kvmclock_cpufreq_notifier()
7920 if (val == CPUFREQ_POSTCHANGE && freq->old < freq->new) in kvmclock_cpufreq_notifier()
7923 for_each_cpu(cpu, freq->policy->cpus) in kvmclock_cpufreq_notifier()
7951 if (policy->cpuinfo.max_freq) in kvm_timer_init()
7952 max_tsc_khz = policy->cpuinfo.max_freq; in kvm_timer_init()
7999 (unsigned long *)&vcpu->arch.pmu.global_status); in kvm_handle_intel_pt_intr()
8055 if (!gtod_is_based_on_tsc(gtod->clock.vclock_mode) && in pvclock_gtod_notify()
8073 r = -EEXIST; in kvm_arch_init()
8077 if (!ops->cpu_has_kvm_support()) { in kvm_arch_init()
8079 r = -EOPNOTSUPP; in kvm_arch_init()
8082 if (ops->disabled_by_bios()) { in kvm_arch_init()
8084 r = -EOPNOTSUPP; in kvm_arch_init()
8095 r = -EOPNOTSUPP; in kvm_arch_init()
8099 r = -ENOMEM; in kvm_arch_init()
8129 if (ops->intel_pt_intr_in_guest && ops->intel_pt_intr_in_guest()) in kvm_arch_init()
8139 if (pi_inject_timer == -1) in kvm_arch_init()
8188 ++vcpu->stat.halt_exits; in kvm_vcpu_halt()
8190 vcpu->arch.mp_state = KVM_MP_STATE_HALTED; in kvm_vcpu_halt()
8193 vcpu->run->exit_reason = KVM_EXIT_HLT; in kvm_vcpu_halt()
8203 * TODO: we might be squashing a GUESTDBG_SINGLESTEP-triggered in kvm_emulate_halt()
8220 return -KVM_EOPNOTSUPP; in kvm_pv_clock_pairing()
8223 return -KVM_EOPNOTSUPP; in kvm_pv_clock_pairing()
8232 if (kvm_write_guest(vcpu->kvm, paddr, &clock_pairing, in kvm_pv_clock_pairing()
8234 ret = -KVM_EFAULT; in kvm_pv_clock_pairing()
8243 * @apicid - apicid of vcpu to be kicked.
8263 return (READ_ONCE(kvm->arch.apicv_inhibit_reasons) == 0); in kvm_apicv_activated()
8271 &kvm->arch.apicv_inhibit_reasons); in kvm_apicv_init()
8274 &kvm->arch.apicv_inhibit_reasons); in kvm_apicv_init()
8284 map = rcu_dereference(kvm->arch.apic_map); in kvm_sched_yield()
8286 if (likely(map) && dest_id <= map->max_apic_id && map->phys_map[dest_id]) in kvm_sched_yield()
8287 target = map->phys_map[dest_id]->vcpu; in kvm_sched_yield()
8291 if (target && READ_ONCE(target->ready)) in kvm_sched_yield()
8300 if (kvm_hv_hypercall_enabled(vcpu->kvm)) in kvm_emulate_hypercall()
8321 ret = -KVM_EPERM; in kvm_emulate_hypercall()
8325 ret = -KVM_ENOSYS; in kvm_emulate_hypercall()
8335 kvm_pv_kick_cpu_op(vcpu->kvm, a0, a1); in kvm_emulate_hypercall()
8336 kvm_sched_yield(vcpu->kvm, a1); in kvm_emulate_hypercall()
8348 ret = kvm_pv_send_ipi(vcpu->kvm, a0, a1, a2, a3, op_64_bit); in kvm_emulate_hypercall()
8354 kvm_sched_yield(vcpu->kvm, a0); in kvm_emulate_hypercall()
8358 ret = -KVM_ENOSYS; in kvm_emulate_hypercall()
8366 ++vcpu->stat.hypercalls; in kvm_emulate_hypercall()
8380 &ctxt->exception); in emulator_fix_hypercall()
8385 return vcpu->run->request_interrupt_window && in dm_request_for_irq_injection()
8386 likely(!pic_in_kernel(vcpu->kvm)); in dm_request_for_irq_injection()
8391 struct kvm_run *kvm_run = vcpu->run; in post_kvm_run_save()
8393 kvm_run->if_flag = (kvm_get_rflags(vcpu) & X86_EFLAGS_IF) != 0; in post_kvm_run_save()
8394 kvm_run->flags = is_smm(vcpu) ? KVM_RUN_X86_SMM : 0; in post_kvm_run_save()
8395 kvm_run->cr8 = kvm_get_cr8(vcpu); in post_kvm_run_save()
8396 kvm_run->apic_base = kvm_get_apic_base(vcpu); in post_kvm_run_save()
8397 kvm_run->ready_for_interrupt_injection = in post_kvm_run_save()
8398 pic_in_kernel(vcpu->kvm) || in post_kvm_run_save()
8412 if (vcpu->arch.apicv_active) in update_cr8_intercept()
8415 if (!vcpu->arch.apic->vapic_addr) in update_cr8_intercept()
8418 max_irr = -1; in update_cr8_intercept()
8420 if (max_irr != -1) in update_cr8_intercept()
8430 trace_kvm_inj_exception(vcpu->arch.exception.nr, in kvm_inject_exception()
8431 vcpu->arch.exception.has_error_code, in kvm_inject_exception()
8432 vcpu->arch.exception.error_code, in kvm_inject_exception()
8433 vcpu->arch.exception.injected); in kvm_inject_exception()
8435 if (vcpu->arch.exception.error_code && !is_protmode(vcpu)) in kvm_inject_exception()
8436 vcpu->arch.exception.error_code = false; in kvm_inject_exception()
8447 if (vcpu->arch.exception.injected) { in inject_pending_event()
8455 * Trap-like exceptions, e.g. #DB, have higher priority than in inject_pending_event()
8458 * Fault-like exceptions, e.g. #GP and #PF, are the lowest in inject_pending_event()
8460 * execution, i.e. a pending fault-like exception means the in inject_pending_event()
8465 else if (!vcpu->arch.exception.pending) { in inject_pending_event()
8466 if (vcpu->arch.nmi_injected) { in inject_pending_event()
8469 } else if (vcpu->arch.interrupt.injected) { in inject_pending_event()
8475 WARN_ON_ONCE(vcpu->arch.exception.injected && in inject_pending_event()
8476 vcpu->arch.exception.pending); in inject_pending_event()
8480 * in order for caller to determine if it should require immediate-exit in inject_pending_event()
8485 r = kvm_x86_ops.nested_ops->check_events(vcpu); in inject_pending_event()
8491 if (vcpu->arch.exception.pending) { in inject_pending_event()
8493 * Fault-class exceptions, except #DBs, set RF=1 in the RFLAGS in inject_pending_event()
8494 * value pushed on the stack. Trap-like exception and all #DBs in inject_pending_event()
8495 * leave RF as-is (KVM follows Intel's behavior in this regard; in inject_pending_event()
8500 * fault-like. They do _not_ set RF, a la code breakpoints. in inject_pending_event()
8502 if (exception_type(vcpu->arch.exception.nr) == EXCPT_FAULT) in inject_pending_event()
8506 if (vcpu->arch.exception.nr == DB_VECTOR) { in inject_pending_event()
8508 if (vcpu->arch.dr7 & DR7_GD) { in inject_pending_event()
8509 vcpu->arch.dr7 &= ~DR7_GD; in inject_pending_event()
8516 vcpu->arch.exception.pending = false; in inject_pending_event()
8517 vcpu->arch.exception.injected = true; in inject_pending_event()
8524 * due to architectural conditions (e.g. IF=0) a window-open exit in inject_pending_event()
8525 * will re-request KVM_REQ_EVENT. Sometimes however an event is pending in inject_pending_event()
8531 * The kvm_x86_ops hooks communicate this by returning -EBUSY. in inject_pending_event()
8533 if (vcpu->arch.smi_pending) { in inject_pending_event()
8534 r = can_inject ? kvm_x86_ops.smi_allowed(vcpu, true) : -EBUSY; in inject_pending_event()
8538 vcpu->arch.smi_pending = false; in inject_pending_event()
8539 ++vcpu->arch.smi_count; in inject_pending_event()
8546 if (vcpu->arch.nmi_pending) { in inject_pending_event()
8547 r = can_inject ? kvm_x86_ops.nmi_allowed(vcpu, true) : -EBUSY; in inject_pending_event()
8551 --vcpu->arch.nmi_pending; in inject_pending_event()
8552 vcpu->arch.nmi_injected = true; in inject_pending_event()
8557 if (vcpu->arch.nmi_pending) in inject_pending_event()
8562 r = can_inject ? kvm_x86_ops.interrupt_allowed(vcpu, true) : -EBUSY; in inject_pending_event()
8575 kvm_x86_ops.nested_ops->hv_timer_pending && in inject_pending_event()
8576 kvm_x86_ops.nested_ops->hv_timer_pending(vcpu)) in inject_pending_event()
8579 WARN_ON(vcpu->arch.exception.pending); in inject_pending_event()
8596 if (kvm_x86_ops.get_nmi_mask(vcpu) || vcpu->arch.nmi_injected) in process_nmi()
8599 vcpu->arch.nmi_pending += atomic_xchg(&vcpu->arch.nmi_queued, 0); in process_nmi()
8600 vcpu->arch.nmi_pending = min(vcpu->arch.nmi_pending, limit); in process_nmi()
8607 flags |= seg->g << 23; in enter_smm_get_segment_flags()
8608 flags |= seg->db << 22; in enter_smm_get_segment_flags()
8609 flags |= seg->l << 21; in enter_smm_get_segment_flags()
8610 flags |= seg->avl << 20; in enter_smm_get_segment_flags()
8611 flags |= seg->present << 15; in enter_smm_get_segment_flags()
8612 flags |= seg->dpl << 13; in enter_smm_get_segment_flags()
8613 flags |= seg->s << 12; in enter_smm_get_segment_flags()
8614 flags |= seg->type << 8; in enter_smm_get_segment_flags()
8629 offset = 0x7f2c + (n - 3) * 12; in enter_smm_save_seg_32()
8701 put_smstate(u32, buf, 0x7ef8, vcpu->arch.smbase); in enter_smm_save_state_32()
8713 put_smstate(u64, buf, 0x7ff8 - i * 8, kvm_register_read(vcpu, i)); in enter_smm_save_state_64()
8727 put_smstate(u32, buf, 0x7f00, vcpu->arch.smbase); in enter_smm_save_state_64()
8732 put_smstate(u64, buf, 0x7ed0, vcpu->arch.efer); in enter_smm_save_state_64()
8766 trace_kvm_enter_smm(vcpu->vcpu_id, vcpu->arch.smbase, true); in enter_smm()
8776 * Give pre_enter_smm() a chance to make ISA-specific changes to the in enter_smm()
8778 * the SMM state-save area. in enter_smm()
8782 vcpu->arch.hflags |= HF_SMM_MASK; in enter_smm()
8783 kvm_vcpu_write_guest(vcpu, vcpu->arch.smbase + 0xfe00, buf, sizeof(buf)); in enter_smm()
8786 vcpu->arch.hflags |= HF_SMM_INSIDE_NMI_MASK; in enter_smm()
8793 cr0 = vcpu->arch.cr0 & ~(X86_CR0_PE | X86_CR0_EM | X86_CR0_TS | X86_CR0_PG); in enter_smm()
8795 vcpu->arch.cr0 = cr0; in enter_smm()
8805 cs.selector = (vcpu->arch.smbase >> 4) & 0xffff; in enter_smm()
8806 cs.base = vcpu->arch.smbase; in enter_smm()
8841 vcpu->arch.smi_pending = true; in process_smi()
8868 vcpu->arch.apicv_active = kvm_apicv_activated(vcpu->kvm); in kvm_vcpu_update_apicv()
8877 * In particular, kvm_request_apicv_update() expects kvm->srcu not to be
8879 * synchronize_srcu(&kvm->srcu).
8890 old = READ_ONCE(kvm->arch.apicv_inhibit_reasons); in kvm_request_apicv_update()
8899 old = cmpxchg(&kvm->arch.apicv_inhibit_reasons, expected, new); in kvm_request_apicv_update()
8927 bitmap_zero(vcpu->arch.ioapic_handled_vectors, 256); in vcpu_scan_ioapic()
8929 if (irqchip_split(vcpu->kvm)) in vcpu_scan_ioapic()
8930 kvm_scan_ioapic_routes(vcpu, vcpu->arch.ioapic_handled_vectors); in vcpu_scan_ioapic()
8932 if (vcpu->arch.apicv_active) in vcpu_scan_ioapic()
8934 if (ioapic_in_kernel(vcpu->kvm)) in vcpu_scan_ioapic()
8935 kvm_ioapic_scan_entry(vcpu, vcpu->arch.ioapic_handled_vectors); in vcpu_scan_ioapic()
8939 vcpu->arch.load_eoi_exitmap_pending = true; in vcpu_scan_ioapic()
8948 if (!kvm_apic_hw_enabled(vcpu->arch.apic)) in vcpu_load_eoi_exitmap()
8951 bitmap_or((ulong *)eoi_exit_bitmap, vcpu->arch.ioapic_handled_vectors, in vcpu_load_eoi_exitmap()
8952 vcpu_to_synic(vcpu)->vec_bitmap, 256); in vcpu_load_eoi_exitmap()
8989 smp_send_reschedule(vcpu->cpu); in __kvm_request_immediate_exit()
9010 if (unlikely(!kvm_x86_ops.nested_ops->get_nested_state_pages(vcpu))) { in vcpu_enter_guest()
9020 kvm_gen_update_masterclock(vcpu->kvm); in vcpu_enter_guest()
9044 vcpu->run->exit_reason = KVM_EXIT_TPR_ACCESS; in vcpu_enter_guest()
9049 vcpu->run->exit_reason = KVM_EXIT_SHUTDOWN; in vcpu_enter_guest()
9050 vcpu->mmio_needed = 0; in vcpu_enter_guest()
9056 vcpu->arch.apf.halted = true; in vcpu_enter_guest()
9071 BUG_ON(vcpu->arch.pending_ioapic_eoi > 255); in vcpu_enter_guest()
9072 if (test_bit(vcpu->arch.pending_ioapic_eoi, in vcpu_enter_guest()
9073 vcpu->arch.ioapic_handled_vectors)) { in vcpu_enter_guest()
9074 vcpu->run->exit_reason = KVM_EXIT_IOAPIC_EOI; in vcpu_enter_guest()
9075 vcpu->run->eoi.vector = in vcpu_enter_guest()
9076 vcpu->arch.pending_ioapic_eoi; in vcpu_enter_guest()
9088 vcpu->run->exit_reason = KVM_EXIT_SYSTEM_EVENT; in vcpu_enter_guest()
9089 vcpu->run->system_event.type = KVM_SYSTEM_EVENT_CRASH; in vcpu_enter_guest()
9094 vcpu->run->exit_reason = KVM_EXIT_SYSTEM_EVENT; in vcpu_enter_guest()
9095 vcpu->run->system_event.type = KVM_SYSTEM_EVENT_RESET; in vcpu_enter_guest()
9100 vcpu->run->exit_reason = KVM_EXIT_HYPERV; in vcpu_enter_guest()
9101 vcpu->run->hyperv = vcpu->arch.hyperv.exit; in vcpu_enter_guest()
9108 * KVM_REQ_CLOCK_UPDATE, because Hyper-V SynIC timers in vcpu_enter_guest()
9109 * depend on the guest clock being up-to-date in vcpu_enter_guest()
9122 ++vcpu->stat.req_event; in vcpu_enter_guest()
9124 if (vcpu->arch.mp_state == KVM_MP_STATE_INIT_RECEIVED) { in vcpu_enter_guest()
9154 vcpu->mode = IN_GUEST_MODE; in vcpu_enter_guest()
9156 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx); in vcpu_enter_guest()
9159 * 1) We should set ->mode before checking ->requests. Please see in vcpu_enter_guest()
9162 * 2) For APICv, we should set ->mode before checking PID.ON. This in vcpu_enter_guest()
9176 if (kvm_lapic_enabled(vcpu) && vcpu->arch.apicv_active) in vcpu_enter_guest()
9180 vcpu->mode = OUTSIDE_GUEST_MODE; in vcpu_enter_guest()
9184 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); in vcpu_enter_guest()
9200 if (unlikely(vcpu->arch.switch_db_regs)) { in vcpu_enter_guest()
9202 set_debugreg(vcpu->arch.eff_db[0], 0); in vcpu_enter_guest()
9203 set_debugreg(vcpu->arch.eff_db[1], 1); in vcpu_enter_guest()
9204 set_debugreg(vcpu->arch.eff_db[2], 2); in vcpu_enter_guest()
9205 set_debugreg(vcpu->arch.eff_db[3], 3); in vcpu_enter_guest()
9206 set_debugreg(vcpu->arch.dr6, 6); in vcpu_enter_guest()
9207 vcpu->arch.switch_db_regs &= ~KVM_DEBUGREG_RELOAD; in vcpu_enter_guest()
9220 if (unlikely(vcpu->arch.switch_db_regs & KVM_DEBUGREG_WONT_EXIT)) { in vcpu_enter_guest()
9221 WARN_ON(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP); in vcpu_enter_guest()
9225 vcpu->arch.switch_db_regs &= ~KVM_DEBUGREG_RELOAD; in vcpu_enter_guest()
9238 vcpu->arch.last_vmentry_cpu = vcpu->cpu; in vcpu_enter_guest()
9239 vcpu->arch.last_guest_tsc = kvm_read_l1_tsc(vcpu, rdtsc()); in vcpu_enter_guest()
9241 vcpu->mode = OUTSIDE_GUEST_MODE; in vcpu_enter_guest()
9248 * VM-Exit on SVM and any ticks that occur between VM-Exit and now. in vcpu_enter_guest()
9255 ++vcpu->stat.exits; in vcpu_enter_guest()
9269 s64 delta = vcpu->arch.apic->lapic_timer.advance_expire_delta; in vcpu_enter_guest()
9271 trace_kvm_wait_lapic_expire(vcpu->vcpu_id, delta); in vcpu_enter_guest()
9272 vcpu->arch.apic->lapic_timer.advance_expire_delta = S64_MIN; in vcpu_enter_guest()
9279 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); in vcpu_enter_guest()
9289 if (unlikely(vcpu->arch.tsc_always_catchup)) in vcpu_enter_guest()
9292 if (vcpu->arch.apic_attention) in vcpu_enter_guest()
9302 if (unlikely(vcpu->arch.apic_attention)) in vcpu_enter_guest()
9312 srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx); in vcpu_block()
9314 vcpu->srcu_idx = srcu_read_lock(&kvm->srcu); in vcpu_block()
9324 switch(vcpu->arch.mp_state) { in vcpu_block()
9326 vcpu->arch.pv.pv_unhalted = false; in vcpu_block()
9327 vcpu->arch.mp_state = in vcpu_block()
9331 vcpu->arch.apf.halted = false; in vcpu_block()
9336 return -EINTR; in vcpu_block()
9344 kvm_x86_ops.nested_ops->check_events(vcpu); in kvm_vcpu_running()
9346 return (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE && in kvm_vcpu_running()
9347 !vcpu->arch.apf.halted); in kvm_vcpu_running()
9353 struct kvm *kvm = vcpu->kvm; in vcpu_run()
9355 vcpu->srcu_idx = srcu_read_lock(&kvm->srcu); in vcpu_run()
9356 vcpu->arch.l1tf_flush_l1d = true; in vcpu_run()
9375 vcpu->run->exit_reason = KVM_EXIT_IRQ_WINDOW_OPEN; in vcpu_run()
9376 ++vcpu->stat.request_irq_exits; in vcpu_run()
9381 srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx); in vcpu_run()
9385 vcpu->srcu_idx = srcu_read_lock(&kvm->srcu); in vcpu_run()
9389 srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx); in vcpu_run()
9398 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); in complete_emulated_io()
9400 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx); in complete_emulated_io()
9406 BUG_ON(!vcpu->arch.pio.count); in complete_emulated_pio()
9431 struct kvm_run *run = vcpu->run; in complete_emulated_mmio()
9435 BUG_ON(!vcpu->mmio_needed); in complete_emulated_mmio()
9438 frag = &vcpu->mmio_fragments[vcpu->mmio_cur_fragment]; in complete_emulated_mmio()
9439 len = min(8u, frag->len); in complete_emulated_mmio()
9440 if (!vcpu->mmio_is_write) in complete_emulated_mmio()
9441 memcpy(frag->data, run->mmio.data, len); in complete_emulated_mmio()
9443 if (frag->len <= 8) { in complete_emulated_mmio()
9446 vcpu->mmio_cur_fragment++; in complete_emulated_mmio()
9449 frag->data += len; in complete_emulated_mmio()
9450 frag->gpa += len; in complete_emulated_mmio()
9451 frag->len -= len; in complete_emulated_mmio()
9454 if (vcpu->mmio_cur_fragment >= vcpu->mmio_nr_fragments) { in complete_emulated_mmio()
9455 vcpu->mmio_needed = 0; in complete_emulated_mmio()
9457 /* FIXME: return into emulator if single-stepping. */ in complete_emulated_mmio()
9458 if (vcpu->mmio_is_write) in complete_emulated_mmio()
9460 vcpu->mmio_read_completed = 1; in complete_emulated_mmio()
9464 run->exit_reason = KVM_EXIT_MMIO; in complete_emulated_mmio()
9465 run->mmio.phys_addr = frag->gpa; in complete_emulated_mmio()
9466 if (vcpu->mmio_is_write) in complete_emulated_mmio()
9467 memcpy(run->mmio.data, frag->data, min(8u, frag->len)); in complete_emulated_mmio()
9468 run->mmio.len = min(8u, frag->len); in complete_emulated_mmio()
9469 run->mmio.is_write = vcpu->mmio_is_write; in complete_emulated_mmio()
9470 vcpu->arch.complete_userspace_io = complete_emulated_mmio; in complete_emulated_mmio()
9481 memcpy(&fpu->state, ¤t->thread.fpu.state, in kvm_save_current_fpu()
9492 kvm_save_current_fpu(vcpu->arch.user_fpu); in kvm_load_guest_fpu()
9495 __copy_kernel_to_fpregs(&vcpu->arch.guest_fpu->state, in kvm_load_guest_fpu()
9509 kvm_save_current_fpu(vcpu->arch.guest_fpu); in kvm_put_guest_fpu()
9511 copy_kernel_to_fpregs(&vcpu->arch.user_fpu->state); in kvm_put_guest_fpu()
9516 ++vcpu->stat.fpu_reload; in kvm_put_guest_fpu()
9522 struct kvm_run *kvm_run = vcpu->run; in kvm_arch_vcpu_ioctl_run()
9529 if (unlikely(vcpu->arch.mp_state == KVM_MP_STATE_UNINITIALIZED)) { in kvm_arch_vcpu_ioctl_run()
9530 if (kvm_run->immediate_exit) { in kvm_arch_vcpu_ioctl_run()
9531 r = -EINTR; in kvm_arch_vcpu_ioctl_run()
9537 r = -EAGAIN; in kvm_arch_vcpu_ioctl_run()
9539 r = -EINTR; in kvm_arch_vcpu_ioctl_run()
9540 kvm_run->exit_reason = KVM_EXIT_INTR; in kvm_arch_vcpu_ioctl_run()
9541 ++vcpu->stat.signal_exits; in kvm_arch_vcpu_ioctl_run()
9546 if (kvm_run->kvm_valid_regs & ~KVM_SYNC_X86_VALID_FIELDS) { in kvm_arch_vcpu_ioctl_run()
9547 r = -EINVAL; in kvm_arch_vcpu_ioctl_run()
9551 if (kvm_run->kvm_dirty_regs) { in kvm_arch_vcpu_ioctl_run()
9557 /* re-sync apic's tpr */ in kvm_arch_vcpu_ioctl_run()
9559 if (kvm_set_cr8(vcpu, kvm_run->cr8) != 0) { in kvm_arch_vcpu_ioctl_run()
9560 r = -EINVAL; in kvm_arch_vcpu_ioctl_run()
9565 if (unlikely(vcpu->arch.complete_userspace_io)) { in kvm_arch_vcpu_ioctl_run()
9566 int (*cui)(struct kvm_vcpu *) = vcpu->arch.complete_userspace_io; in kvm_arch_vcpu_ioctl_run()
9567 vcpu->arch.complete_userspace_io = NULL; in kvm_arch_vcpu_ioctl_run()
9572 WARN_ON(vcpu->arch.pio.count || vcpu->mmio_needed); in kvm_arch_vcpu_ioctl_run()
9574 if (kvm_run->immediate_exit) in kvm_arch_vcpu_ioctl_run()
9575 r = -EINTR; in kvm_arch_vcpu_ioctl_run()
9581 if (kvm_run->kvm_valid_regs) in kvm_arch_vcpu_ioctl_run()
9592 if (vcpu->arch.emulate_regs_need_sync_to_vcpu) { in __get_regs()
9600 emulator_writeback_register_cache(vcpu->arch.emulate_ctxt); in __get_regs()
9601 vcpu->arch.emulate_regs_need_sync_to_vcpu = false; in __get_regs()
9603 regs->rax = kvm_rax_read(vcpu); in __get_regs()
9604 regs->rbx = kvm_rbx_read(vcpu); in __get_regs()
9605 regs->rcx = kvm_rcx_read(vcpu); in __get_regs()
9606 regs->rdx = kvm_rdx_read(vcpu); in __get_regs()
9607 regs->rsi = kvm_rsi_read(vcpu); in __get_regs()
9608 regs->rdi = kvm_rdi_read(vcpu); in __get_regs()
9609 regs->rsp = kvm_rsp_read(vcpu); in __get_regs()
9610 regs->rbp = kvm_rbp_read(vcpu); in __get_regs()
9612 regs->r8 = kvm_r8_read(vcpu); in __get_regs()
9613 regs->r9 = kvm_r9_read(vcpu); in __get_regs()
9614 regs->r10 = kvm_r10_read(vcpu); in __get_regs()
9615 regs->r11 = kvm_r11_read(vcpu); in __get_regs()
9616 regs->r12 = kvm_r12_read(vcpu); in __get_regs()
9617 regs->r13 = kvm_r13_read(vcpu); in __get_regs()
9618 regs->r14 = kvm_r14_read(vcpu); in __get_regs()
9619 regs->r15 = kvm_r15_read(vcpu); in __get_regs()
9622 regs->rip = kvm_rip_read(vcpu); in __get_regs()
9623 regs->rflags = kvm_get_rflags(vcpu); in __get_regs()
9636 vcpu->arch.emulate_regs_need_sync_from_vcpu = true; in __set_regs()
9637 vcpu->arch.emulate_regs_need_sync_to_vcpu = false; in __set_regs()
9639 kvm_rax_write(vcpu, regs->rax); in __set_regs()
9640 kvm_rbx_write(vcpu, regs->rbx); in __set_regs()
9641 kvm_rcx_write(vcpu, regs->rcx); in __set_regs()
9642 kvm_rdx_write(vcpu, regs->rdx); in __set_regs()
9643 kvm_rsi_write(vcpu, regs->rsi); in __set_regs()
9644 kvm_rdi_write(vcpu, regs->rdi); in __set_regs()
9645 kvm_rsp_write(vcpu, regs->rsp); in __set_regs()
9646 kvm_rbp_write(vcpu, regs->rbp); in __set_regs()
9648 kvm_r8_write(vcpu, regs->r8); in __set_regs()
9649 kvm_r9_write(vcpu, regs->r9); in __set_regs()
9650 kvm_r10_write(vcpu, regs->r10); in __set_regs()
9651 kvm_r11_write(vcpu, regs->r11); in __set_regs()
9652 kvm_r12_write(vcpu, regs->r12); in __set_regs()
9653 kvm_r13_write(vcpu, regs->r13); in __set_regs()
9654 kvm_r14_write(vcpu, regs->r14); in __set_regs()
9655 kvm_r15_write(vcpu, regs->r15); in __set_regs()
9658 kvm_rip_write(vcpu, regs->rip); in __set_regs()
9659 kvm_set_rflags(vcpu, regs->rflags | X86_EFLAGS_FIXED); in __set_regs()
9661 vcpu->arch.exception.pending = false; in __set_regs()
9688 kvm_get_segment(vcpu, &sregs->cs, VCPU_SREG_CS); in __get_sregs()
9689 kvm_get_segment(vcpu, &sregs->ds, VCPU_SREG_DS); in __get_sregs()
9690 kvm_get_segment(vcpu, &sregs->es, VCPU_SREG_ES); in __get_sregs()
9691 kvm_get_segment(vcpu, &sregs->fs, VCPU_SREG_FS); in __get_sregs()
9692 kvm_get_segment(vcpu, &sregs->gs, VCPU_SREG_GS); in __get_sregs()
9693 kvm_get_segment(vcpu, &sregs->ss, VCPU_SREG_SS); in __get_sregs()
9695 kvm_get_segment(vcpu, &sregs->tr, VCPU_SREG_TR); in __get_sregs()
9696 kvm_get_segment(vcpu, &sregs->ldt, VCPU_SREG_LDTR); in __get_sregs()
9699 sregs->idt.limit = dt.size; in __get_sregs()
9700 sregs->idt.base = dt.address; in __get_sregs()
9702 sregs->gdt.limit = dt.size; in __get_sregs()
9703 sregs->gdt.base = dt.address; in __get_sregs()
9705 sregs->cr0 = kvm_read_cr0(vcpu); in __get_sregs()
9706 sregs->cr2 = vcpu->arch.cr2; in __get_sregs()
9707 sregs->cr3 = kvm_read_cr3(vcpu); in __get_sregs()
9708 sregs->cr4 = kvm_read_cr4(vcpu); in __get_sregs()
9709 sregs->cr8 = kvm_get_cr8(vcpu); in __get_sregs()
9710 sregs->efer = vcpu->arch.efer; in __get_sregs()
9711 sregs->apic_base = kvm_get_apic_base(vcpu); in __get_sregs()
9713 memset(sregs->interrupt_bitmap, 0, sizeof(sregs->interrupt_bitmap)); in __get_sregs()
9715 if (vcpu->arch.interrupt.injected && !vcpu->arch.interrupt.soft) in __get_sregs()
9716 set_bit(vcpu->arch.interrupt.nr, in __get_sregs()
9717 (unsigned long *)sregs->interrupt_bitmap); in __get_sregs()
9737 if (vcpu->arch.mp_state == KVM_MP_STATE_HALTED && in kvm_arch_vcpu_ioctl_get_mpstate()
9738 vcpu->arch.pv.pv_unhalted) in kvm_arch_vcpu_ioctl_get_mpstate()
9739 mp_state->mp_state = KVM_MP_STATE_RUNNABLE; in kvm_arch_vcpu_ioctl_get_mpstate()
9741 mp_state->mp_state = vcpu->arch.mp_state; in kvm_arch_vcpu_ioctl_get_mpstate()
9752 int ret = -EINVAL; in kvm_arch_vcpu_ioctl_set_mpstate()
9757 mp_state->mp_state != KVM_MP_STATE_RUNNABLE) in kvm_arch_vcpu_ioctl_set_mpstate()
9765 if ((kvm_vcpu_latch_init(vcpu) || vcpu->arch.smi_pending) && in kvm_arch_vcpu_ioctl_set_mpstate()
9766 (mp_state->mp_state == KVM_MP_STATE_SIPI_RECEIVED || in kvm_arch_vcpu_ioctl_set_mpstate()
9767 mp_state->mp_state == KVM_MP_STATE_INIT_RECEIVED)) in kvm_arch_vcpu_ioctl_set_mpstate()
9770 if (mp_state->mp_state == KVM_MP_STATE_SIPI_RECEIVED) { in kvm_arch_vcpu_ioctl_set_mpstate()
9771 vcpu->arch.mp_state = KVM_MP_STATE_INIT_RECEIVED; in kvm_arch_vcpu_ioctl_set_mpstate()
9772 set_bit(KVM_APIC_SIPI, &vcpu->arch.apic->pending_events); in kvm_arch_vcpu_ioctl_set_mpstate()
9774 vcpu->arch.mp_state = mp_state->mp_state; in kvm_arch_vcpu_ioctl_set_mpstate()
9786 struct x86_emulate_ctxt *ctxt = vcpu->arch.emulate_ctxt; in kvm_task_switch()
9794 vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; in kvm_task_switch()
9795 vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION; in kvm_task_switch()
9796 vcpu->run->internal.ndata = 0; in kvm_task_switch()
9800 kvm_rip_write(vcpu, ctxt->eip); in kvm_task_switch()
9801 kvm_set_rflags(vcpu, ctxt->eflags); in kvm_task_switch()
9808 if ((sregs->efer & EFER_LME) && (sregs->cr0 & X86_CR0_PG)) { in kvm_valid_sregs()
9811 * 64-bit mode (though maybe in a 32-bit code segment). in kvm_valid_sregs()
9814 if (!(sregs->cr4 & X86_CR4_PAE) in kvm_valid_sregs()
9815 || !(sregs->efer & EFER_LMA)) in kvm_valid_sregs()
9816 return -EINVAL; in kvm_valid_sregs()
9817 if (sregs->cr3 & vcpu->arch.cr3_lm_rsvd_bits) in kvm_valid_sregs()
9818 return -EINVAL; in kvm_valid_sregs()
9821 * Not in 64-bit mode: EFER.LMA is clear and the code in kvm_valid_sregs()
9822 * segment cannot be 64-bit. in kvm_valid_sregs()
9824 if (sregs->efer & EFER_LMA || sregs->cs.l) in kvm_valid_sregs()
9825 return -EINVAL; in kvm_valid_sregs()
9828 return kvm_valid_cr4(vcpu, sregs->cr4); in kvm_valid_sregs()
9838 int ret = -EINVAL; in __set_sregs()
9843 apic_base_msr.data = sregs->apic_base; in __set_sregs()
9848 dt.size = sregs->idt.limit; in __set_sregs()
9849 dt.address = sregs->idt.base; in __set_sregs()
9851 dt.size = sregs->gdt.limit; in __set_sregs()
9852 dt.address = sregs->gdt.base; in __set_sregs()
9855 vcpu->arch.cr2 = sregs->cr2; in __set_sregs()
9856 mmu_reset_needed |= kvm_read_cr3(vcpu) != sregs->cr3; in __set_sregs()
9857 vcpu->arch.cr3 = sregs->cr3; in __set_sregs()
9860 kvm_set_cr8(vcpu, sregs->cr8); in __set_sregs()
9862 mmu_reset_needed |= vcpu->arch.efer != sregs->efer; in __set_sregs()
9863 kvm_x86_ops.set_efer(vcpu, sregs->efer); in __set_sregs()
9865 mmu_reset_needed |= kvm_read_cr0(vcpu) != sregs->cr0; in __set_sregs()
9866 kvm_x86_ops.set_cr0(vcpu, sregs->cr0); in __set_sregs()
9867 vcpu->arch.cr0 = sregs->cr0; in __set_sregs()
9869 mmu_reset_needed |= kvm_read_cr4(vcpu) != sregs->cr4; in __set_sregs()
9870 cpuid_update_needed |= ((kvm_read_cr4(vcpu) ^ sregs->cr4) & in __set_sregs()
9872 kvm_x86_ops.set_cr4(vcpu, sregs->cr4); in __set_sregs()
9876 idx = srcu_read_lock(&vcpu->kvm->srcu); in __set_sregs()
9878 load_pdptrs(vcpu, vcpu->arch.walk_mmu, kvm_read_cr3(vcpu)); in __set_sregs()
9881 srcu_read_unlock(&vcpu->kvm->srcu, idx); in __set_sregs()
9888 (const unsigned long *)sregs->interrupt_bitmap, max_bits); in __set_sregs()
9894 kvm_set_segment(vcpu, &sregs->cs, VCPU_SREG_CS); in __set_sregs()
9895 kvm_set_segment(vcpu, &sregs->ds, VCPU_SREG_DS); in __set_sregs()
9896 kvm_set_segment(vcpu, &sregs->es, VCPU_SREG_ES); in __set_sregs()
9897 kvm_set_segment(vcpu, &sregs->fs, VCPU_SREG_FS); in __set_sregs()
9898 kvm_set_segment(vcpu, &sregs->gs, VCPU_SREG_GS); in __set_sregs()
9899 kvm_set_segment(vcpu, &sregs->ss, VCPU_SREG_SS); in __set_sregs()
9901 kvm_set_segment(vcpu, &sregs->tr, VCPU_SREG_TR); in __set_sregs()
9902 kvm_set_segment(vcpu, &sregs->ldt, VCPU_SREG_LDTR); in __set_sregs()
9908 sregs->cs.selector == 0xf000 && sregs->cs.base == 0xffff0000 && in __set_sregs()
9910 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; in __set_sregs()
9938 if (dbg->control & (KVM_GUESTDBG_INJECT_DB | KVM_GUESTDBG_INJECT_BP)) { in kvm_arch_vcpu_ioctl_set_guest_debug()
9939 r = -EBUSY; in kvm_arch_vcpu_ioctl_set_guest_debug()
9940 if (vcpu->arch.exception.pending) in kvm_arch_vcpu_ioctl_set_guest_debug()
9942 if (dbg->control & KVM_GUESTDBG_INJECT_DB) in kvm_arch_vcpu_ioctl_set_guest_debug()
9954 vcpu->guest_debug = dbg->control; in kvm_arch_vcpu_ioctl_set_guest_debug()
9955 if (!(vcpu->guest_debug & KVM_GUESTDBG_ENABLE)) in kvm_arch_vcpu_ioctl_set_guest_debug()
9956 vcpu->guest_debug = 0; in kvm_arch_vcpu_ioctl_set_guest_debug()
9958 if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) { in kvm_arch_vcpu_ioctl_set_guest_debug()
9960 vcpu->arch.eff_db[i] = dbg->arch.debugreg[i]; in kvm_arch_vcpu_ioctl_set_guest_debug()
9961 vcpu->arch.guest_debug_dr7 = dbg->arch.debugreg[7]; in kvm_arch_vcpu_ioctl_set_guest_debug()
9964 vcpu->arch.eff_db[i] = vcpu->arch.db[i]; in kvm_arch_vcpu_ioctl_set_guest_debug()
9968 if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) in kvm_arch_vcpu_ioctl_set_guest_debug()
9969 vcpu->arch.singlestep_rip = kvm_rip_read(vcpu) + in kvm_arch_vcpu_ioctl_set_guest_debug()
9993 unsigned long vaddr = tr->linear_address; in kvm_arch_vcpu_ioctl_translate()
9999 idx = srcu_read_lock(&vcpu->kvm->srcu); in kvm_arch_vcpu_ioctl_translate()
10001 srcu_read_unlock(&vcpu->kvm->srcu, idx); in kvm_arch_vcpu_ioctl_translate()
10002 tr->physical_address = gpa; in kvm_arch_vcpu_ioctl_translate()
10003 tr->valid = gpa != UNMAPPED_GVA; in kvm_arch_vcpu_ioctl_translate()
10004 tr->writeable = 1; in kvm_arch_vcpu_ioctl_translate()
10005 tr->usermode = 0; in kvm_arch_vcpu_ioctl_translate()
10017 fxsave = &vcpu->arch.guest_fpu->state.fxsave; in kvm_arch_vcpu_ioctl_get_fpu()
10018 memcpy(fpu->fpr, fxsave->st_space, 128); in kvm_arch_vcpu_ioctl_get_fpu()
10019 fpu->fcw = fxsave->cwd; in kvm_arch_vcpu_ioctl_get_fpu()
10020 fpu->fsw = fxsave->swd; in kvm_arch_vcpu_ioctl_get_fpu()
10021 fpu->ftwx = fxsave->twd; in kvm_arch_vcpu_ioctl_get_fpu()
10022 fpu->last_opcode = fxsave->fop; in kvm_arch_vcpu_ioctl_get_fpu()
10023 fpu->last_ip = fxsave->rip; in kvm_arch_vcpu_ioctl_get_fpu()
10024 fpu->last_dp = fxsave->rdp; in kvm_arch_vcpu_ioctl_get_fpu()
10025 memcpy(fpu->xmm, fxsave->xmm_space, sizeof(fxsave->xmm_space)); in kvm_arch_vcpu_ioctl_get_fpu()
10037 fxsave = &vcpu->arch.guest_fpu->state.fxsave; in kvm_arch_vcpu_ioctl_set_fpu()
10039 memcpy(fxsave->st_space, fpu->fpr, 128); in kvm_arch_vcpu_ioctl_set_fpu()
10040 fxsave->cwd = fpu->fcw; in kvm_arch_vcpu_ioctl_set_fpu()
10041 fxsave->swd = fpu->fsw; in kvm_arch_vcpu_ioctl_set_fpu()
10042 fxsave->twd = fpu->ftwx; in kvm_arch_vcpu_ioctl_set_fpu()
10043 fxsave->fop = fpu->last_opcode; in kvm_arch_vcpu_ioctl_set_fpu()
10044 fxsave->rip = fpu->last_ip; in kvm_arch_vcpu_ioctl_set_fpu()
10045 fxsave->rdp = fpu->last_dp; in kvm_arch_vcpu_ioctl_set_fpu()
10046 memcpy(fxsave->xmm_space, fpu->xmm, sizeof(fxsave->xmm_space)); in kvm_arch_vcpu_ioctl_set_fpu()
10056 if (vcpu->run->kvm_valid_regs & KVM_SYNC_X86_REGS) in store_regs()
10057 __get_regs(vcpu, &vcpu->run->s.regs.regs); in store_regs()
10059 if (vcpu->run->kvm_valid_regs & KVM_SYNC_X86_SREGS) in store_regs()
10060 __get_sregs(vcpu, &vcpu->run->s.regs.sregs); in store_regs()
10062 if (vcpu->run->kvm_valid_regs & KVM_SYNC_X86_EVENTS) in store_regs()
10064 vcpu, &vcpu->run->s.regs.events); in store_regs()
10069 if (vcpu->run->kvm_dirty_regs & ~KVM_SYNC_X86_VALID_FIELDS) in sync_regs()
10070 return -EINVAL; in sync_regs()
10072 if (vcpu->run->kvm_dirty_regs & KVM_SYNC_X86_REGS) { in sync_regs()
10073 __set_regs(vcpu, &vcpu->run->s.regs.regs); in sync_regs()
10074 vcpu->run->kvm_dirty_regs &= ~KVM_SYNC_X86_REGS; in sync_regs()
10076 if (vcpu->run->kvm_dirty_regs & KVM_SYNC_X86_SREGS) { in sync_regs()
10077 if (__set_sregs(vcpu, &vcpu->run->s.regs.sregs)) in sync_regs()
10078 return -EINVAL; in sync_regs()
10079 vcpu->run->kvm_dirty_regs &= ~KVM_SYNC_X86_SREGS; in sync_regs()
10081 if (vcpu->run->kvm_dirty_regs & KVM_SYNC_X86_EVENTS) { in sync_regs()
10083 vcpu, &vcpu->run->s.regs.events)) in sync_regs()
10084 return -EINVAL; in sync_regs()
10085 vcpu->run->kvm_dirty_regs &= ~KVM_SYNC_X86_EVENTS; in sync_regs()
10093 fpstate_init(&vcpu->arch.guest_fpu->state); in fx_init()
10095 vcpu->arch.guest_fpu->state.xsave.header.xcomp_bv = in fx_init()
10101 vcpu->arch.xcr0 = XFEATURE_MASK_FP; in fx_init()
10103 vcpu->arch.cr0 |= X86_CR0_ET; in fx_init()
10108 if (kvm_check_tsc_unstable() && atomic_read(&kvm->online_vcpus) != 0) in kvm_arch_vcpu_precreate()
10120 if (!irqchip_in_kernel(vcpu->kvm) || kvm_vcpu_is_reset_bsp(vcpu)) in kvm_arch_vcpu_create()
10121 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; in kvm_arch_vcpu_create()
10123 vcpu->arch.mp_state = KVM_MP_STATE_UNINITIALIZED; in kvm_arch_vcpu_create()
10131 if (irqchip_in_kernel(vcpu->kvm)) { in kvm_arch_vcpu_create()
10135 if (kvm_apicv_activated(vcpu->kvm)) in kvm_arch_vcpu_create()
10136 vcpu->arch.apicv_active = true; in kvm_arch_vcpu_create()
10140 r = -ENOMEM; in kvm_arch_vcpu_create()
10145 vcpu->arch.pio_data = page_address(page); in kvm_arch_vcpu_create()
10147 vcpu->arch.mce_banks = kzalloc(KVM_MAX_MCE_BANKS * sizeof(u64) * 4, in kvm_arch_vcpu_create()
10149 if (!vcpu->arch.mce_banks) in kvm_arch_vcpu_create()
10151 vcpu->arch.mcg_cap = KVM_MAX_MCE_BANKS; in kvm_arch_vcpu_create()
10153 if (!zalloc_cpumask_var(&vcpu->arch.wbinvd_dirty_mask, in kvm_arch_vcpu_create()
10160 vcpu->arch.user_fpu = kmem_cache_zalloc(x86_fpu_cache, in kvm_arch_vcpu_create()
10162 if (!vcpu->arch.user_fpu) { in kvm_arch_vcpu_create()
10167 vcpu->arch.guest_fpu = kmem_cache_zalloc(x86_fpu_cache, in kvm_arch_vcpu_create()
10169 if (!vcpu->arch.guest_fpu) { in kvm_arch_vcpu_create()
10175 vcpu->arch.maxphyaddr = cpuid_query_maxphyaddr(vcpu); in kvm_arch_vcpu_create()
10176 vcpu->arch.cr3_lm_rsvd_bits = rsvd_bits(cpuid_maxphyaddr(vcpu), 63); in kvm_arch_vcpu_create()
10178 vcpu->arch.pat = MSR_IA32_CR_PAT_DEFAULT; in kvm_arch_vcpu_create()
10183 vcpu->arch.pending_external_vector = -1; in kvm_arch_vcpu_create()
10184 vcpu->arch.preempted_in_kernel = false; in kvm_arch_vcpu_create()
10192 vcpu->arch.arch_capabilities = kvm_get_arch_capabilities(); in kvm_arch_vcpu_create()
10193 vcpu->arch.msr_platform_info = MSR_PLATFORM_INFO_CPUID_FAULT; in kvm_arch_vcpu_create()
10202 kmem_cache_free(x86_fpu_cache, vcpu->arch.guest_fpu); in kvm_arch_vcpu_create()
10204 kmem_cache_free(x86_fpu_cache, vcpu->arch.user_fpu); in kvm_arch_vcpu_create()
10206 kmem_cache_free(x86_emulator_cache, vcpu->arch.emulate_ctxt); in kvm_arch_vcpu_create()
10208 free_cpumask_var(vcpu->arch.wbinvd_dirty_mask); in kvm_arch_vcpu_create()
10210 kfree(vcpu->arch.mce_banks); in kvm_arch_vcpu_create()
10212 free_page((unsigned long)vcpu->arch.pio_data); in kvm_arch_vcpu_create()
10222 struct kvm *kvm = vcpu->kvm; in kvm_arch_vcpu_postcreate()
10226 if (mutex_lock_killable(&vcpu->mutex)) in kvm_arch_vcpu_postcreate()
10233 vcpu->arch.msr_kvm_poll_control = 1; in kvm_arch_vcpu_postcreate()
10235 mutex_unlock(&vcpu->mutex); in kvm_arch_vcpu_postcreate()
10237 if (kvmclock_periodic_sync && vcpu->vcpu_idx == 0) in kvm_arch_vcpu_postcreate()
10238 schedule_delayed_work(&kvm->arch.kvmclock_sync_work, in kvm_arch_vcpu_postcreate()
10244 struct gfn_to_pfn_cache *cache = &vcpu->arch.st.cache; in kvm_arch_vcpu_destroy()
10247 kvm_release_pfn(cache->pfn, cache->dirty, cache); in kvm_arch_vcpu_destroy()
10253 kmem_cache_free(x86_emulator_cache, vcpu->arch.emulate_ctxt); in kvm_arch_vcpu_destroy()
10254 free_cpumask_var(vcpu->arch.wbinvd_dirty_mask); in kvm_arch_vcpu_destroy()
10255 kmem_cache_free(x86_fpu_cache, vcpu->arch.user_fpu); in kvm_arch_vcpu_destroy()
10256 kmem_cache_free(x86_fpu_cache, vcpu->arch.guest_fpu); in kvm_arch_vcpu_destroy()
10260 kfree(vcpu->arch.mce_banks); in kvm_arch_vcpu_destroy()
10262 idx = srcu_read_lock(&vcpu->kvm->srcu); in kvm_arch_vcpu_destroy()
10264 srcu_read_unlock(&vcpu->kvm->srcu, idx); in kvm_arch_vcpu_destroy()
10265 free_page((unsigned long)vcpu->arch.pio_data); in kvm_arch_vcpu_destroy()
10266 kvfree(vcpu->arch.cpuid_entries); in kvm_arch_vcpu_destroy()
10275 vcpu->arch.hflags = 0; in kvm_vcpu_reset()
10277 vcpu->arch.smi_pending = 0; in kvm_vcpu_reset()
10278 vcpu->arch.smi_count = 0; in kvm_vcpu_reset()
10279 atomic_set(&vcpu->arch.nmi_queued, 0); in kvm_vcpu_reset()
10280 vcpu->arch.nmi_pending = 0; in kvm_vcpu_reset()
10281 vcpu->arch.nmi_injected = false; in kvm_vcpu_reset()
10285 memset(vcpu->arch.db, 0, sizeof(vcpu->arch.db)); in kvm_vcpu_reset()
10287 vcpu->arch.dr6 = DR6_INIT; in kvm_vcpu_reset()
10288 vcpu->arch.dr7 = DR7_FIXED_1; in kvm_vcpu_reset()
10291 vcpu->arch.cr2 = 0; in kvm_vcpu_reset()
10294 vcpu->arch.apf.msr_en_val = 0; in kvm_vcpu_reset()
10295 vcpu->arch.apf.msr_int_val = 0; in kvm_vcpu_reset()
10296 vcpu->arch.st.msr_val = 0; in kvm_vcpu_reset()
10302 vcpu->arch.apf.halted = false; in kvm_vcpu_reset()
10313 mpx_state_buffer = get_xsave_addr(&vcpu->arch.guest_fpu->state.xsave, in kvm_vcpu_reset()
10317 mpx_state_buffer = get_xsave_addr(&vcpu->arch.guest_fpu->state.xsave, in kvm_vcpu_reset()
10327 vcpu->arch.smbase = 0x30000; in kvm_vcpu_reset()
10329 vcpu->arch.msr_misc_features_enables = 0; in kvm_vcpu_reset()
10331 vcpu->arch.xcr0 = XFEATURE_MASK_FP; in kvm_vcpu_reset()
10334 memset(vcpu->arch.regs, 0, sizeof(vcpu->arch.regs)); in kvm_vcpu_reset()
10335 vcpu->arch.regs_avail = ~0; in kvm_vcpu_reset()
10336 vcpu->arch.regs_dirty = ~0; in kvm_vcpu_reset()
10338 vcpu->arch.ia32_xss = 0; in kvm_vcpu_reset()
10373 if (!stable && vcpu->cpu == smp_processor_id()) in kvm_arch_hardware_enable()
10375 if (stable && vcpu->arch.last_host_tsc > local_tsc) { in kvm_arch_hardware_enable()
10377 if (vcpu->arch.last_host_tsc > max_tsc) in kvm_arch_hardware_enable()
10378 max_tsc = vcpu->arch.last_host_tsc; in kvm_arch_hardware_enable()
10408 * N.B. - this code below runs only on platforms with reliable TSC, in kvm_arch_hardware_enable()
10422 u64 delta_cyc = max_tsc - local_tsc; in kvm_arch_hardware_enable()
10424 kvm->arch.backwards_tsc_observed = true; in kvm_arch_hardware_enable()
10426 vcpu->arch.tsc_offset_adjustment += delta_cyc; in kvm_arch_hardware_enable()
10427 vcpu->arch.last_host_tsc = local_tsc; in kvm_arch_hardware_enable()
10437 kvm->arch.last_tsc_nsec = 0; in kvm_arch_hardware_enable()
10438 kvm->arch.last_tsc_write = 0; in kvm_arch_hardware_enable()
10461 r = ops->hardware_setup(); in kvm_arch_hardware_setup()
10465 memcpy(&kvm_x86_ops, ops->runtime_ops, sizeof(kvm_x86_ops)); in kvm_arch_hardware_setup()
10506 return -EIO; in kvm_arch_check_processor_compat()
10508 return ops->check_processor_compatibility(); in kvm_arch_check_processor_compat()
10513 return vcpu->kvm->arch.bsp_vcpu_id == vcpu->vcpu_id; in kvm_vcpu_is_reset_bsp()
10519 return (vcpu->arch.apic_base & MSR_IA32_APICBASE_BSP) != 0; in kvm_vcpu_is_bsp()
10529 vcpu->arch.l1tf_flush_l1d = true; in kvm_arch_sched_in()
10530 if (pmu->version && unlikely(pmu->event_count)) { in kvm_arch_sched_in()
10531 pmu->need_cleanup = true; in kvm_arch_sched_in()
10539 kfree(kvm->arch.hyperv.hv_pa_pg); in kvm_arch_free_vm()
10549 return -EINVAL; in kvm_arch_init_vm()
10555 INIT_HLIST_HEAD(&kvm->arch.mask_notifier_list); in kvm_arch_init_vm()
10556 INIT_LIST_HEAD(&kvm->arch.active_mmu_pages); in kvm_arch_init_vm()
10557 INIT_LIST_HEAD(&kvm->arch.zapped_obsolete_pages); in kvm_arch_init_vm()
10558 INIT_LIST_HEAD(&kvm->arch.lpage_disallowed_mmu_pages); in kvm_arch_init_vm()
10559 INIT_LIST_HEAD(&kvm->arch.assigned_dev_head); in kvm_arch_init_vm()
10560 atomic_set(&kvm->arch.noncoherent_dma_count, 0); in kvm_arch_init_vm()
10563 set_bit(KVM_USERSPACE_IRQ_SOURCE_ID, &kvm->arch.irq_sources_bitmap); in kvm_arch_init_vm()
10564 /* Reserve bit 1 of irq_sources_bitmap for irqfd-resampler */ in kvm_arch_init_vm()
10566 &kvm->arch.irq_sources_bitmap); in kvm_arch_init_vm()
10568 raw_spin_lock_init(&kvm->arch.tsc_write_lock); in kvm_arch_init_vm()
10569 mutex_init(&kvm->arch.apic_map_lock); in kvm_arch_init_vm()
10570 spin_lock_init(&kvm->arch.pvclock_gtod_sync_lock); in kvm_arch_init_vm()
10572 kvm->arch.kvmclock_offset = -get_kvmclock_base_ns(); in kvm_arch_init_vm()
10575 kvm->arch.guest_can_read_msr_platform_info = true; in kvm_arch_init_vm()
10577 INIT_DELAYED_WORK(&kvm->arch.kvmclock_update_work, kvmclock_update_fn); in kvm_arch_init_vm()
10578 INIT_DELAYED_WORK(&kvm->arch.kvmclock_sync_work, kvmclock_sync_fn); in kvm_arch_init_vm()
10613 mutex_lock(&kvm->lock); in kvm_free_vcpus()
10614 for (i = 0; i < atomic_read(&kvm->online_vcpus); i++) in kvm_free_vcpus()
10615 kvm->vcpus[i] = NULL; in kvm_free_vcpus()
10617 atomic_set(&kvm->online_vcpus, 0); in kvm_free_vcpus()
10618 mutex_unlock(&kvm->lock); in kvm_free_vcpus()
10623 cancel_delayed_work_sync(&kvm->arch.kvmclock_sync_work); in kvm_arch_sync_events()
10624 cancel_delayed_work_sync(&kvm->arch.kvmclock_update_work); in kvm_arch_sync_events()
10635 /* Called with kvm->slots_lock held. */ in __x86_set_memory_region()
10637 return -EINVAL; in __x86_set_memory_region()
10641 if (slot && slot->npages) in __x86_set_memory_region()
10642 return -EEXIST; in __x86_set_memory_region()
10653 if (!slot || !slot->npages) in __x86_set_memory_region()
10656 old_npages = slot->npages; in __x86_set_memory_region()
10687 if (current->mm == kvm->mm) { in kvm_arch_destroy_vm()
10693 mutex_lock(&kvm->slots_lock); in kvm_arch_destroy_vm()
10699 mutex_unlock(&kvm->slots_lock); in kvm_arch_destroy_vm()
10703 kvm_free_msr_filter(srcu_dereference_check(kvm->arch.msr_filter, &kvm->srcu, 1)); in kvm_arch_destroy_vm()
10707 kvfree(rcu_dereference_check(kvm->arch.apic_map, 1)); in kvm_arch_destroy_vm()
10708 kfree(srcu_dereference_check(kvm->arch.pmu_event_filter, &kvm->srcu, 1)); in kvm_arch_destroy_vm()
10719 kvfree(slot->arch.rmap[i]); in kvm_arch_free_memslot()
10720 slot->arch.rmap[i] = NULL; in kvm_arch_free_memslot()
10725 kvfree(slot->arch.lpage_info[i - 1]); in kvm_arch_free_memslot()
10726 slot->arch.lpage_info[i - 1] = NULL; in kvm_arch_free_memslot()
10742 memset(&slot->arch, 0, sizeof(slot->arch)); in kvm_alloc_memslot_metadata()
10750 lpages = gfn_to_index(slot->base_gfn + npages - 1, in kvm_alloc_memslot_metadata()
10751 slot->base_gfn, level) + 1; in kvm_alloc_memslot_metadata()
10753 slot->arch.rmap[i] = in kvm_alloc_memslot_metadata()
10754 kvcalloc(lpages, sizeof(*slot->arch.rmap[i]), in kvm_alloc_memslot_metadata()
10756 if (!slot->arch.rmap[i]) in kvm_alloc_memslot_metadata()
10765 slot->arch.lpage_info[i - 1] = linfo; in kvm_alloc_memslot_metadata()
10767 if (slot->base_gfn & (KVM_PAGES_PER_HPAGE(level) - 1)) in kvm_alloc_memslot_metadata()
10769 if ((slot->base_gfn + npages) & (KVM_PAGES_PER_HPAGE(level) - 1)) in kvm_alloc_memslot_metadata()
10770 linfo[lpages - 1].disallow_lpage = 1; in kvm_alloc_memslot_metadata()
10771 ugfn = slot->userspace_addr >> PAGE_SHIFT; in kvm_alloc_memslot_metadata()
10776 if ((slot->base_gfn ^ ugfn) & (KVM_PAGES_PER_HPAGE(level) - 1)) { in kvm_alloc_memslot_metadata()
10791 kvfree(slot->arch.rmap[i]); in kvm_alloc_memslot_metadata()
10792 slot->arch.rmap[i] = NULL; in kvm_alloc_memslot_metadata()
10796 kvfree(slot->arch.lpage_info[i - 1]); in kvm_alloc_memslot_metadata()
10797 slot->arch.lpage_info[i - 1] = NULL; in kvm_alloc_memslot_metadata()
10799 return -ENOMEM; in kvm_alloc_memslot_metadata()
10808 * memslots->generation has been incremented. in kvm_arch_memslots_updated()
10813 /* Force re-initialization of steal_time cache */ in kvm_arch_memslots_updated()
10825 mem->memory_size >> PAGE_SHIFT); in kvm_arch_prepare_memory_region()
10838 if ((change != KVM_MR_FLAGS_ONLY) || (new->flags & KVM_MEM_READONLY)) in kvm_mmu_slot_apply_flags()
10850 * which can be collapsed into a single large-page spte. Later in kvm_mmu_slot_apply_flags()
10851 * page faults will create the large-page sptes. in kvm_mmu_slot_apply_flags()
10858 if ((old->flags & KVM_MEM_LOG_DIRTY_PAGES) && in kvm_mmu_slot_apply_flags()
10859 !(new->flags & KVM_MEM_LOG_DIRTY_PAGES)) in kvm_mmu_slot_apply_flags()
10875 * When disabling dirty logging with PML enabled, the D-bit is set in kvm_mmu_slot_apply_flags()
10882 * When enabling dirty logging, large sptes are write-protected in kvm_mmu_slot_apply_flags()
10887 * initial-all-set state. Otherwise, depending on whether pml in kvm_mmu_slot_apply_flags()
10888 * is enabled the D-bit or the W-bit will be cleared. in kvm_mmu_slot_apply_flags()
10890 if (new->flags & KVM_MEM_LOG_DIRTY_PAGES) { in kvm_mmu_slot_apply_flags()
10899 * If we're with initial-all-set, we don't need in kvm_mmu_slot_apply_flags()
10902 * we still need to write-protect huge pages in kvm_mmu_slot_apply_flags()
10920 if (!kvm->arch.n_requested_mmu_pages) in kvm_arch_commit_memory_region()
10925 * FIXME: const-ify all uses of struct kvm_memory_slot. in kvm_arch_commit_memory_region()
10954 if (!list_empty_careful(&vcpu->async_pf.done)) in kvm_vcpu_has_events()
10960 if (vcpu->arch.pv.pv_unhalted) in kvm_vcpu_has_events()
10963 if (vcpu->arch.exception.pending) in kvm_vcpu_has_events()
10967 (vcpu->arch.nmi_pending && in kvm_vcpu_has_events()
10972 (vcpu->arch.smi_pending && in kvm_vcpu_has_events()
10985 kvm_x86_ops.nested_ops->hv_timer_pending && in kvm_vcpu_has_events()
10986 kvm_x86_ops.nested_ops->hv_timer_pending(vcpu)) in kvm_vcpu_has_events()
10999 if (READ_ONCE(vcpu->arch.pv.pv_unhalted)) in kvm_arch_dy_runnable()
11007 if (vcpu->arch.apicv_active && kvm_x86_ops.dy_apicv_has_pending_interrupt(vcpu)) in kvm_arch_dy_runnable()
11015 return vcpu->arch.preempted_in_kernel; in kvm_arch_vcpu_in_kernel()
11048 if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) in kvm_get_rflags()
11056 if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP && in __kvm_set_rflags()
11057 kvm_is_linear_rip(vcpu, vcpu->arch.singlestep_rip)) in __kvm_set_rflags()
11073 if ((vcpu->arch.mmu->direct_map != work->arch.direct_map) || in kvm_arch_async_page_ready()
11074 work->wakeup_all) in kvm_arch_async_page_ready()
11081 if (!vcpu->arch.mmu->direct_map && in kvm_arch_async_page_ready()
11082 work->arch.cr3 != vcpu->arch.mmu->get_guest_pgd(vcpu)) in kvm_arch_async_page_ready()
11085 kvm_mmu_do_page_fault(vcpu, work->cr2_or_gpa, 0, true); in kvm_arch_async_page_ready()
11097 return (key + 1) & (ASYNC_PF_PER_VCPU - 1); in kvm_async_pf_next_probe()
11104 while (vcpu->arch.apf.gfns[key] != ~0) in kvm_add_async_pf_gfn()
11107 vcpu->arch.apf.gfns[key] = gfn; in kvm_add_async_pf_gfn()
11116 (vcpu->arch.apf.gfns[key] != gfn && in kvm_async_pf_gfn_slot()
11117 vcpu->arch.apf.gfns[key] != ~0); i++) in kvm_async_pf_gfn_slot()
11125 return vcpu->arch.apf.gfns[kvm_async_pf_gfn_slot(vcpu, gfn)] == gfn; in kvm_find_async_pf_gfn()
11134 if (WARN_ON_ONCE(vcpu->arch.apf.gfns[i] != gfn)) in kvm_del_async_pf_gfn()
11138 vcpu->arch.apf.gfns[i] = ~0; in kvm_del_async_pf_gfn()
11141 if (vcpu->arch.apf.gfns[j] == ~0) in kvm_del_async_pf_gfn()
11143 k = kvm_async_pf_hash_fn(vcpu->arch.apf.gfns[j]); in kvm_del_async_pf_gfn()
11150 vcpu->arch.apf.gfns[i] = vcpu->arch.apf.gfns[j]; in kvm_del_async_pf_gfn()
11159 return kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.apf.data, &reason, in apf_put_user_notpresent()
11167 return kvm_write_guest_offset_cached(vcpu->kvm, &vcpu->arch.apf.data, in apf_put_user_ready()
11176 if (kvm_read_guest_offset_cached(vcpu->kvm, &vcpu->arch.apf.data, in apf_pageready_slot_free()
11185 if (!vcpu->arch.apf.delivery_as_pf_vmexit && is_guest_mode(vcpu)) in kvm_can_deliver_async_pf()
11189 (vcpu->arch.apf.send_user_only && kvm_x86_ops.get_cpl(vcpu) == 0)) in kvm_can_deliver_async_pf()
11199 vcpu->arch.exception.pending)) in kvm_can_do_async_pf()
11202 if (kvm_hlt_in_guest(vcpu->kvm) && !kvm_can_deliver_async_pf(vcpu)) in kvm_can_do_async_pf()
11217 trace_kvm_async_pf_not_present(work->arch.token, work->cr2_or_gpa); in kvm_arch_async_page_not_present()
11218 kvm_add_async_pf_gfn(vcpu, work->arch.gfn); in kvm_arch_async_page_not_present()
11226 fault.address = work->arch.token; in kvm_arch_async_page_not_present()
11249 .vector = vcpu->arch.apf.vec in kvm_arch_async_page_present()
11252 if (work->wakeup_all) in kvm_arch_async_page_present()
11253 work->arch.token = ~0; /* broadcast wakeup */ in kvm_arch_async_page_present()
11255 kvm_del_async_pf_gfn(vcpu, work->arch.gfn); in kvm_arch_async_page_present()
11256 trace_kvm_async_pf_ready(work->arch.token, work->cr2_or_gpa); in kvm_arch_async_page_present()
11258 if ((work->wakeup_all || work->notpresent_injected) && in kvm_arch_async_page_present()
11260 !apf_put_user_ready(vcpu, work->arch.token)) { in kvm_arch_async_page_present()
11261 vcpu->arch.apf.pageready_pending = true; in kvm_arch_async_page_present()
11265 vcpu->arch.apf.halted = false; in kvm_arch_async_page_present()
11266 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; in kvm_arch_async_page_present()
11272 if (!vcpu->arch.apf.pageready_pending) in kvm_arch_async_page_present_queued()
11286 atomic_inc(&kvm->arch.assigned_device_count); in kvm_arch_start_assignment()
11292 atomic_dec(&kvm->arch.assigned_device_count); in kvm_arch_end_assignment()
11298 return arch_atomic_read(&kvm->arch.assigned_device_count); in kvm_arch_has_assigned_device()
11304 atomic_inc(&kvm->arch.noncoherent_dma_count); in kvm_arch_register_noncoherent_dma()
11310 atomic_dec(&kvm->arch.noncoherent_dma_count); in kvm_arch_unregister_noncoherent_dma()
11316 return atomic_read(&kvm->arch.noncoherent_dma_count); in kvm_arch_has_noncoherent_dma()
11332 irqfd->producer = prod; in kvm_arch_irq_bypass_add_producer()
11333 kvm_arch_start_assignment(irqfd->kvm); in kvm_arch_irq_bypass_add_producer()
11334 ret = kvm_x86_ops.update_pi_irte(irqfd->kvm, in kvm_arch_irq_bypass_add_producer()
11335 prod->irq, irqfd->gsi, 1); in kvm_arch_irq_bypass_add_producer()
11338 kvm_arch_end_assignment(irqfd->kvm); in kvm_arch_irq_bypass_add_producer()
11350 WARN_ON(irqfd->producer != prod); in kvm_arch_irq_bypass_del_producer()
11351 irqfd->producer = NULL; in kvm_arch_irq_bypass_del_producer()
11355 * remapped mode, so we can re-use the current implementation in kvm_arch_irq_bypass_del_producer()
11359 ret = kvm_x86_ops.update_pi_irte(irqfd->kvm, prod->irq, irqfd->gsi, 0); in kvm_arch_irq_bypass_del_producer()
11362 " fails: %d\n", irqfd->consumer.token, ret); in kvm_arch_irq_bypass_del_producer()
11364 kvm_arch_end_assignment(irqfd->kvm); in kvm_arch_irq_bypass_del_producer()
11380 return (vcpu->arch.msr_kvm_poll_control & 1) == 0; in kvm_arch_no_poll()
11418 vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, &fault) != UNMAPPED_GVA) { in kvm_fixup_and_inject_pf_error()
11420 * If vcpu->arch.walk_mmu->gva_to_gpa succeeded, the page in kvm_fixup_and_inject_pf_error()
11430 vcpu->arch.walk_mmu->inject_page_fault(vcpu, &fault); in kvm_fixup_and_inject_pf_error()
11451 * doesn't seem to be a real use-case behind such requests, just return in kvm_handle_memory_failure()
11454 vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; in kvm_handle_memory_failure()
11455 vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION; in kvm_handle_memory_failure()
11456 vcpu->run->internal.ndata = 0; in kvm_handle_memory_failure()
11507 if (kvm_get_pcid(vcpu, vcpu->arch.mmu->prev_roots[i].pgd) in kvm_handle_invpcid()
11511 kvm_mmu_free_roots(vcpu, vcpu->arch.mmu, roots_to_free); in kvm_handle_invpcid()
11523 * page tables, so a non-global flush just degenerates to a in kvm_handle_invpcid()