xref: /OK3568_Linux_fs/kernel/arch/x86/kvm/x86.h (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0 */
2*4882a593Smuzhiyun #ifndef ARCH_X86_KVM_X86_H
3*4882a593Smuzhiyun #define ARCH_X86_KVM_X86_H
4*4882a593Smuzhiyun 
5*4882a593Smuzhiyun #include <linux/kvm_host.h>
6*4882a593Smuzhiyun #include <asm/pvclock.h>
7*4882a593Smuzhiyun #include "kvm_cache_regs.h"
8*4882a593Smuzhiyun #include "kvm_emulate.h"
9*4882a593Smuzhiyun 
10*4882a593Smuzhiyun #define KVM_DEFAULT_PLE_GAP		128
11*4882a593Smuzhiyun #define KVM_VMX_DEFAULT_PLE_WINDOW	4096
12*4882a593Smuzhiyun #define KVM_DEFAULT_PLE_WINDOW_GROW	2
13*4882a593Smuzhiyun #define KVM_DEFAULT_PLE_WINDOW_SHRINK	0
14*4882a593Smuzhiyun #define KVM_VMX_DEFAULT_PLE_WINDOW_MAX	UINT_MAX
15*4882a593Smuzhiyun #define KVM_SVM_DEFAULT_PLE_WINDOW_MAX	USHRT_MAX
16*4882a593Smuzhiyun #define KVM_SVM_DEFAULT_PLE_WINDOW	3000
17*4882a593Smuzhiyun 
__grow_ple_window(unsigned int val,unsigned int base,unsigned int modifier,unsigned int max)18*4882a593Smuzhiyun static inline unsigned int __grow_ple_window(unsigned int val,
19*4882a593Smuzhiyun 		unsigned int base, unsigned int modifier, unsigned int max)
20*4882a593Smuzhiyun {
21*4882a593Smuzhiyun 	u64 ret = val;
22*4882a593Smuzhiyun 
23*4882a593Smuzhiyun 	if (modifier < 1)
24*4882a593Smuzhiyun 		return base;
25*4882a593Smuzhiyun 
26*4882a593Smuzhiyun 	if (modifier < base)
27*4882a593Smuzhiyun 		ret *= modifier;
28*4882a593Smuzhiyun 	else
29*4882a593Smuzhiyun 		ret += modifier;
30*4882a593Smuzhiyun 
31*4882a593Smuzhiyun 	return min(ret, (u64)max);
32*4882a593Smuzhiyun }
33*4882a593Smuzhiyun 
__shrink_ple_window(unsigned int val,unsigned int base,unsigned int modifier,unsigned int min)34*4882a593Smuzhiyun static inline unsigned int __shrink_ple_window(unsigned int val,
35*4882a593Smuzhiyun 		unsigned int base, unsigned int modifier, unsigned int min)
36*4882a593Smuzhiyun {
37*4882a593Smuzhiyun 	if (modifier < 1)
38*4882a593Smuzhiyun 		return base;
39*4882a593Smuzhiyun 
40*4882a593Smuzhiyun 	if (modifier < base)
41*4882a593Smuzhiyun 		val /= modifier;
42*4882a593Smuzhiyun 	else
43*4882a593Smuzhiyun 		val -= modifier;
44*4882a593Smuzhiyun 
45*4882a593Smuzhiyun 	return max(val, min);
46*4882a593Smuzhiyun }
47*4882a593Smuzhiyun 
48*4882a593Smuzhiyun #define MSR_IA32_CR_PAT_DEFAULT  0x0007040600070406ULL
49*4882a593Smuzhiyun 
kvm_clear_exception_queue(struct kvm_vcpu * vcpu)50*4882a593Smuzhiyun static inline void kvm_clear_exception_queue(struct kvm_vcpu *vcpu)
51*4882a593Smuzhiyun {
52*4882a593Smuzhiyun 	vcpu->arch.exception.pending = false;
53*4882a593Smuzhiyun 	vcpu->arch.exception.injected = false;
54*4882a593Smuzhiyun }
55*4882a593Smuzhiyun 
kvm_queue_interrupt(struct kvm_vcpu * vcpu,u8 vector,bool soft)56*4882a593Smuzhiyun static inline void kvm_queue_interrupt(struct kvm_vcpu *vcpu, u8 vector,
57*4882a593Smuzhiyun 	bool soft)
58*4882a593Smuzhiyun {
59*4882a593Smuzhiyun 	vcpu->arch.interrupt.injected = true;
60*4882a593Smuzhiyun 	vcpu->arch.interrupt.soft = soft;
61*4882a593Smuzhiyun 	vcpu->arch.interrupt.nr = vector;
62*4882a593Smuzhiyun }
63*4882a593Smuzhiyun 
kvm_clear_interrupt_queue(struct kvm_vcpu * vcpu)64*4882a593Smuzhiyun static inline void kvm_clear_interrupt_queue(struct kvm_vcpu *vcpu)
65*4882a593Smuzhiyun {
66*4882a593Smuzhiyun 	vcpu->arch.interrupt.injected = false;
67*4882a593Smuzhiyun }
68*4882a593Smuzhiyun 
kvm_event_needs_reinjection(struct kvm_vcpu * vcpu)69*4882a593Smuzhiyun static inline bool kvm_event_needs_reinjection(struct kvm_vcpu *vcpu)
70*4882a593Smuzhiyun {
71*4882a593Smuzhiyun 	return vcpu->arch.exception.injected || vcpu->arch.interrupt.injected ||
72*4882a593Smuzhiyun 		vcpu->arch.nmi_injected;
73*4882a593Smuzhiyun }
74*4882a593Smuzhiyun 
kvm_exception_is_soft(unsigned int nr)75*4882a593Smuzhiyun static inline bool kvm_exception_is_soft(unsigned int nr)
76*4882a593Smuzhiyun {
77*4882a593Smuzhiyun 	return (nr == BP_VECTOR) || (nr == OF_VECTOR);
78*4882a593Smuzhiyun }
79*4882a593Smuzhiyun 
is_protmode(struct kvm_vcpu * vcpu)80*4882a593Smuzhiyun static inline bool is_protmode(struct kvm_vcpu *vcpu)
81*4882a593Smuzhiyun {
82*4882a593Smuzhiyun 	return kvm_read_cr0_bits(vcpu, X86_CR0_PE);
83*4882a593Smuzhiyun }
84*4882a593Smuzhiyun 
is_long_mode(struct kvm_vcpu * vcpu)85*4882a593Smuzhiyun static inline int is_long_mode(struct kvm_vcpu *vcpu)
86*4882a593Smuzhiyun {
87*4882a593Smuzhiyun #ifdef CONFIG_X86_64
88*4882a593Smuzhiyun 	return vcpu->arch.efer & EFER_LMA;
89*4882a593Smuzhiyun #else
90*4882a593Smuzhiyun 	return 0;
91*4882a593Smuzhiyun #endif
92*4882a593Smuzhiyun }
93*4882a593Smuzhiyun 
is_64_bit_mode(struct kvm_vcpu * vcpu)94*4882a593Smuzhiyun static inline bool is_64_bit_mode(struct kvm_vcpu *vcpu)
95*4882a593Smuzhiyun {
96*4882a593Smuzhiyun 	int cs_db, cs_l;
97*4882a593Smuzhiyun 
98*4882a593Smuzhiyun 	if (!is_long_mode(vcpu))
99*4882a593Smuzhiyun 		return false;
100*4882a593Smuzhiyun 	kvm_x86_ops.get_cs_db_l_bits(vcpu, &cs_db, &cs_l);
101*4882a593Smuzhiyun 	return cs_l;
102*4882a593Smuzhiyun }
103*4882a593Smuzhiyun 
is_la57_mode(struct kvm_vcpu * vcpu)104*4882a593Smuzhiyun static inline bool is_la57_mode(struct kvm_vcpu *vcpu)
105*4882a593Smuzhiyun {
106*4882a593Smuzhiyun #ifdef CONFIG_X86_64
107*4882a593Smuzhiyun 	return (vcpu->arch.efer & EFER_LMA) &&
108*4882a593Smuzhiyun 		 kvm_read_cr4_bits(vcpu, X86_CR4_LA57);
109*4882a593Smuzhiyun #else
110*4882a593Smuzhiyun 	return 0;
111*4882a593Smuzhiyun #endif
112*4882a593Smuzhiyun }
113*4882a593Smuzhiyun 
x86_exception_has_error_code(unsigned int vector)114*4882a593Smuzhiyun static inline bool x86_exception_has_error_code(unsigned int vector)
115*4882a593Smuzhiyun {
116*4882a593Smuzhiyun 	static u32 exception_has_error_code = BIT(DF_VECTOR) | BIT(TS_VECTOR) |
117*4882a593Smuzhiyun 			BIT(NP_VECTOR) | BIT(SS_VECTOR) | BIT(GP_VECTOR) |
118*4882a593Smuzhiyun 			BIT(PF_VECTOR) | BIT(AC_VECTOR);
119*4882a593Smuzhiyun 
120*4882a593Smuzhiyun 	return (1U << vector) & exception_has_error_code;
121*4882a593Smuzhiyun }
122*4882a593Smuzhiyun 
mmu_is_nested(struct kvm_vcpu * vcpu)123*4882a593Smuzhiyun static inline bool mmu_is_nested(struct kvm_vcpu *vcpu)
124*4882a593Smuzhiyun {
125*4882a593Smuzhiyun 	return vcpu->arch.walk_mmu == &vcpu->arch.nested_mmu;
126*4882a593Smuzhiyun }
127*4882a593Smuzhiyun 
kvm_vcpu_flush_tlb_current(struct kvm_vcpu * vcpu)128*4882a593Smuzhiyun static inline void kvm_vcpu_flush_tlb_current(struct kvm_vcpu *vcpu)
129*4882a593Smuzhiyun {
130*4882a593Smuzhiyun 	++vcpu->stat.tlb_flush;
131*4882a593Smuzhiyun 	kvm_x86_ops.tlb_flush_current(vcpu);
132*4882a593Smuzhiyun }
133*4882a593Smuzhiyun 
is_pae(struct kvm_vcpu * vcpu)134*4882a593Smuzhiyun static inline int is_pae(struct kvm_vcpu *vcpu)
135*4882a593Smuzhiyun {
136*4882a593Smuzhiyun 	return kvm_read_cr4_bits(vcpu, X86_CR4_PAE);
137*4882a593Smuzhiyun }
138*4882a593Smuzhiyun 
is_pse(struct kvm_vcpu * vcpu)139*4882a593Smuzhiyun static inline int is_pse(struct kvm_vcpu *vcpu)
140*4882a593Smuzhiyun {
141*4882a593Smuzhiyun 	return kvm_read_cr4_bits(vcpu, X86_CR4_PSE);
142*4882a593Smuzhiyun }
143*4882a593Smuzhiyun 
is_paging(struct kvm_vcpu * vcpu)144*4882a593Smuzhiyun static inline int is_paging(struct kvm_vcpu *vcpu)
145*4882a593Smuzhiyun {
146*4882a593Smuzhiyun 	return likely(kvm_read_cr0_bits(vcpu, X86_CR0_PG));
147*4882a593Smuzhiyun }
148*4882a593Smuzhiyun 
is_pae_paging(struct kvm_vcpu * vcpu)149*4882a593Smuzhiyun static inline bool is_pae_paging(struct kvm_vcpu *vcpu)
150*4882a593Smuzhiyun {
151*4882a593Smuzhiyun 	return !is_long_mode(vcpu) && is_pae(vcpu) && is_paging(vcpu);
152*4882a593Smuzhiyun }
153*4882a593Smuzhiyun 
vcpu_virt_addr_bits(struct kvm_vcpu * vcpu)154*4882a593Smuzhiyun static inline u8 vcpu_virt_addr_bits(struct kvm_vcpu *vcpu)
155*4882a593Smuzhiyun {
156*4882a593Smuzhiyun 	return kvm_read_cr4_bits(vcpu, X86_CR4_LA57) ? 57 : 48;
157*4882a593Smuzhiyun }
158*4882a593Smuzhiyun 
get_canonical(u64 la,u8 vaddr_bits)159*4882a593Smuzhiyun static inline u64 get_canonical(u64 la, u8 vaddr_bits)
160*4882a593Smuzhiyun {
161*4882a593Smuzhiyun 	return ((int64_t)la << (64 - vaddr_bits)) >> (64 - vaddr_bits);
162*4882a593Smuzhiyun }
163*4882a593Smuzhiyun 
is_noncanonical_address(u64 la,struct kvm_vcpu * vcpu)164*4882a593Smuzhiyun static inline bool is_noncanonical_address(u64 la, struct kvm_vcpu *vcpu)
165*4882a593Smuzhiyun {
166*4882a593Smuzhiyun 	return get_canonical(la, vcpu_virt_addr_bits(vcpu)) != la;
167*4882a593Smuzhiyun }
168*4882a593Smuzhiyun 
vcpu_cache_mmio_info(struct kvm_vcpu * vcpu,gva_t gva,gfn_t gfn,unsigned access)169*4882a593Smuzhiyun static inline void vcpu_cache_mmio_info(struct kvm_vcpu *vcpu,
170*4882a593Smuzhiyun 					gva_t gva, gfn_t gfn, unsigned access)
171*4882a593Smuzhiyun {
172*4882a593Smuzhiyun 	u64 gen = kvm_memslots(vcpu->kvm)->generation;
173*4882a593Smuzhiyun 
174*4882a593Smuzhiyun 	if (unlikely(gen & KVM_MEMSLOT_GEN_UPDATE_IN_PROGRESS))
175*4882a593Smuzhiyun 		return;
176*4882a593Smuzhiyun 
177*4882a593Smuzhiyun 	/*
178*4882a593Smuzhiyun 	 * If this is a shadow nested page table, the "GVA" is
179*4882a593Smuzhiyun 	 * actually a nGPA.
180*4882a593Smuzhiyun 	 */
181*4882a593Smuzhiyun 	vcpu->arch.mmio_gva = mmu_is_nested(vcpu) ? 0 : gva & PAGE_MASK;
182*4882a593Smuzhiyun 	vcpu->arch.mmio_access = access;
183*4882a593Smuzhiyun 	vcpu->arch.mmio_gfn = gfn;
184*4882a593Smuzhiyun 	vcpu->arch.mmio_gen = gen;
185*4882a593Smuzhiyun }
186*4882a593Smuzhiyun 
vcpu_match_mmio_gen(struct kvm_vcpu * vcpu)187*4882a593Smuzhiyun static inline bool vcpu_match_mmio_gen(struct kvm_vcpu *vcpu)
188*4882a593Smuzhiyun {
189*4882a593Smuzhiyun 	return vcpu->arch.mmio_gen == kvm_memslots(vcpu->kvm)->generation;
190*4882a593Smuzhiyun }
191*4882a593Smuzhiyun 
192*4882a593Smuzhiyun /*
193*4882a593Smuzhiyun  * Clear the mmio cache info for the given gva. If gva is MMIO_GVA_ANY, we
194*4882a593Smuzhiyun  * clear all mmio cache info.
195*4882a593Smuzhiyun  */
196*4882a593Smuzhiyun #define MMIO_GVA_ANY (~(gva_t)0)
197*4882a593Smuzhiyun 
vcpu_clear_mmio_info(struct kvm_vcpu * vcpu,gva_t gva)198*4882a593Smuzhiyun static inline void vcpu_clear_mmio_info(struct kvm_vcpu *vcpu, gva_t gva)
199*4882a593Smuzhiyun {
200*4882a593Smuzhiyun 	if (gva != MMIO_GVA_ANY && vcpu->arch.mmio_gva != (gva & PAGE_MASK))
201*4882a593Smuzhiyun 		return;
202*4882a593Smuzhiyun 
203*4882a593Smuzhiyun 	vcpu->arch.mmio_gva = 0;
204*4882a593Smuzhiyun }
205*4882a593Smuzhiyun 
vcpu_match_mmio_gva(struct kvm_vcpu * vcpu,unsigned long gva)206*4882a593Smuzhiyun static inline bool vcpu_match_mmio_gva(struct kvm_vcpu *vcpu, unsigned long gva)
207*4882a593Smuzhiyun {
208*4882a593Smuzhiyun 	if (vcpu_match_mmio_gen(vcpu) && vcpu->arch.mmio_gva &&
209*4882a593Smuzhiyun 	      vcpu->arch.mmio_gva == (gva & PAGE_MASK))
210*4882a593Smuzhiyun 		return true;
211*4882a593Smuzhiyun 
212*4882a593Smuzhiyun 	return false;
213*4882a593Smuzhiyun }
214*4882a593Smuzhiyun 
vcpu_match_mmio_gpa(struct kvm_vcpu * vcpu,gpa_t gpa)215*4882a593Smuzhiyun static inline bool vcpu_match_mmio_gpa(struct kvm_vcpu *vcpu, gpa_t gpa)
216*4882a593Smuzhiyun {
217*4882a593Smuzhiyun 	if (vcpu_match_mmio_gen(vcpu) && vcpu->arch.mmio_gfn &&
218*4882a593Smuzhiyun 	      vcpu->arch.mmio_gfn == gpa >> PAGE_SHIFT)
219*4882a593Smuzhiyun 		return true;
220*4882a593Smuzhiyun 
221*4882a593Smuzhiyun 	return false;
222*4882a593Smuzhiyun }
223*4882a593Smuzhiyun 
kvm_register_readl(struct kvm_vcpu * vcpu,int reg)224*4882a593Smuzhiyun static inline unsigned long kvm_register_readl(struct kvm_vcpu *vcpu, int reg)
225*4882a593Smuzhiyun {
226*4882a593Smuzhiyun 	unsigned long val = kvm_register_read(vcpu, reg);
227*4882a593Smuzhiyun 
228*4882a593Smuzhiyun 	return is_64_bit_mode(vcpu) ? val : (u32)val;
229*4882a593Smuzhiyun }
230*4882a593Smuzhiyun 
kvm_register_writel(struct kvm_vcpu * vcpu,int reg,unsigned long val)231*4882a593Smuzhiyun static inline void kvm_register_writel(struct kvm_vcpu *vcpu,
232*4882a593Smuzhiyun 				       int reg, unsigned long val)
233*4882a593Smuzhiyun {
234*4882a593Smuzhiyun 	if (!is_64_bit_mode(vcpu))
235*4882a593Smuzhiyun 		val = (u32)val;
236*4882a593Smuzhiyun 	return kvm_register_write(vcpu, reg, val);
237*4882a593Smuzhiyun }
238*4882a593Smuzhiyun 
kvm_check_has_quirk(struct kvm * kvm,u64 quirk)239*4882a593Smuzhiyun static inline bool kvm_check_has_quirk(struct kvm *kvm, u64 quirk)
240*4882a593Smuzhiyun {
241*4882a593Smuzhiyun 	return !(kvm->arch.disabled_quirks & quirk);
242*4882a593Smuzhiyun }
243*4882a593Smuzhiyun 
kvm_vcpu_latch_init(struct kvm_vcpu * vcpu)244*4882a593Smuzhiyun static inline bool kvm_vcpu_latch_init(struct kvm_vcpu *vcpu)
245*4882a593Smuzhiyun {
246*4882a593Smuzhiyun 	return is_smm(vcpu) || kvm_x86_ops.apic_init_signal_blocked(vcpu);
247*4882a593Smuzhiyun }
248*4882a593Smuzhiyun 
249*4882a593Smuzhiyun void kvm_inject_realmode_interrupt(struct kvm_vcpu *vcpu, int irq, int inc_eip);
250*4882a593Smuzhiyun 
251*4882a593Smuzhiyun void kvm_write_tsc(struct kvm_vcpu *vcpu, struct msr_data *msr);
252*4882a593Smuzhiyun u64 get_kvmclock_ns(struct kvm *kvm);
253*4882a593Smuzhiyun 
254*4882a593Smuzhiyun int kvm_read_guest_virt(struct kvm_vcpu *vcpu,
255*4882a593Smuzhiyun 	gva_t addr, void *val, unsigned int bytes,
256*4882a593Smuzhiyun 	struct x86_exception *exception);
257*4882a593Smuzhiyun 
258*4882a593Smuzhiyun int kvm_write_guest_virt_system(struct kvm_vcpu *vcpu,
259*4882a593Smuzhiyun 	gva_t addr, void *val, unsigned int bytes,
260*4882a593Smuzhiyun 	struct x86_exception *exception);
261*4882a593Smuzhiyun 
262*4882a593Smuzhiyun int handle_ud(struct kvm_vcpu *vcpu);
263*4882a593Smuzhiyun 
264*4882a593Smuzhiyun void kvm_deliver_exception_payload(struct kvm_vcpu *vcpu);
265*4882a593Smuzhiyun 
266*4882a593Smuzhiyun void kvm_vcpu_mtrr_init(struct kvm_vcpu *vcpu);
267*4882a593Smuzhiyun u8 kvm_mtrr_get_guest_memory_type(struct kvm_vcpu *vcpu, gfn_t gfn);
268*4882a593Smuzhiyun bool kvm_mtrr_valid(struct kvm_vcpu *vcpu, u32 msr, u64 data);
269*4882a593Smuzhiyun int kvm_mtrr_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data);
270*4882a593Smuzhiyun int kvm_mtrr_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata);
271*4882a593Smuzhiyun bool kvm_mtrr_check_gfn_range_consistency(struct kvm_vcpu *vcpu, gfn_t gfn,
272*4882a593Smuzhiyun 					  int page_num);
273*4882a593Smuzhiyun bool kvm_vector_hashing_enabled(void);
274*4882a593Smuzhiyun void kvm_fixup_and_inject_pf_error(struct kvm_vcpu *vcpu, gva_t gva, u16 error_code);
275*4882a593Smuzhiyun int x86_decode_emulated_instruction(struct kvm_vcpu *vcpu, int emulation_type,
276*4882a593Smuzhiyun 				    void *insn, int insn_len);
277*4882a593Smuzhiyun int x86_emulate_instruction(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
278*4882a593Smuzhiyun 			    int emulation_type, void *insn, int insn_len);
279*4882a593Smuzhiyun fastpath_t handle_fastpath_set_msr_irqoff(struct kvm_vcpu *vcpu);
280*4882a593Smuzhiyun 
281*4882a593Smuzhiyun extern u64 host_xcr0;
282*4882a593Smuzhiyun extern u64 supported_xcr0;
283*4882a593Smuzhiyun extern u64 supported_xss;
284*4882a593Smuzhiyun 
kvm_mpx_supported(void)285*4882a593Smuzhiyun static inline bool kvm_mpx_supported(void)
286*4882a593Smuzhiyun {
287*4882a593Smuzhiyun 	return (supported_xcr0 & (XFEATURE_MASK_BNDREGS | XFEATURE_MASK_BNDCSR))
288*4882a593Smuzhiyun 		== (XFEATURE_MASK_BNDREGS | XFEATURE_MASK_BNDCSR);
289*4882a593Smuzhiyun }
290*4882a593Smuzhiyun 
291*4882a593Smuzhiyun extern unsigned int min_timer_period_us;
292*4882a593Smuzhiyun 
293*4882a593Smuzhiyun extern bool enable_vmware_backdoor;
294*4882a593Smuzhiyun 
295*4882a593Smuzhiyun extern int pi_inject_timer;
296*4882a593Smuzhiyun 
297*4882a593Smuzhiyun extern struct static_key kvm_no_apic_vcpu;
298*4882a593Smuzhiyun 
nsec_to_cycles(struct kvm_vcpu * vcpu,u64 nsec)299*4882a593Smuzhiyun static inline u64 nsec_to_cycles(struct kvm_vcpu *vcpu, u64 nsec)
300*4882a593Smuzhiyun {
301*4882a593Smuzhiyun 	return pvclock_scale_delta(nsec, vcpu->arch.virtual_tsc_mult,
302*4882a593Smuzhiyun 				   vcpu->arch.virtual_tsc_shift);
303*4882a593Smuzhiyun }
304*4882a593Smuzhiyun 
305*4882a593Smuzhiyun /* Same "calling convention" as do_div:
306*4882a593Smuzhiyun  * - divide (n << 32) by base
307*4882a593Smuzhiyun  * - put result in n
308*4882a593Smuzhiyun  * - return remainder
309*4882a593Smuzhiyun  */
310*4882a593Smuzhiyun #define do_shl32_div32(n, base)					\
311*4882a593Smuzhiyun 	({							\
312*4882a593Smuzhiyun 	    u32 __quot, __rem;					\
313*4882a593Smuzhiyun 	    asm("divl %2" : "=a" (__quot), "=d" (__rem)		\
314*4882a593Smuzhiyun 			: "rm" (base), "0" (0), "1" ((u32) n));	\
315*4882a593Smuzhiyun 	    n = __quot;						\
316*4882a593Smuzhiyun 	    __rem;						\
317*4882a593Smuzhiyun 	 })
318*4882a593Smuzhiyun 
kvm_mwait_in_guest(struct kvm * kvm)319*4882a593Smuzhiyun static inline bool kvm_mwait_in_guest(struct kvm *kvm)
320*4882a593Smuzhiyun {
321*4882a593Smuzhiyun 	return kvm->arch.mwait_in_guest;
322*4882a593Smuzhiyun }
323*4882a593Smuzhiyun 
kvm_hlt_in_guest(struct kvm * kvm)324*4882a593Smuzhiyun static inline bool kvm_hlt_in_guest(struct kvm *kvm)
325*4882a593Smuzhiyun {
326*4882a593Smuzhiyun 	return kvm->arch.hlt_in_guest;
327*4882a593Smuzhiyun }
328*4882a593Smuzhiyun 
kvm_pause_in_guest(struct kvm * kvm)329*4882a593Smuzhiyun static inline bool kvm_pause_in_guest(struct kvm *kvm)
330*4882a593Smuzhiyun {
331*4882a593Smuzhiyun 	return kvm->arch.pause_in_guest;
332*4882a593Smuzhiyun }
333*4882a593Smuzhiyun 
kvm_cstate_in_guest(struct kvm * kvm)334*4882a593Smuzhiyun static inline bool kvm_cstate_in_guest(struct kvm *kvm)
335*4882a593Smuzhiyun {
336*4882a593Smuzhiyun 	return kvm->arch.cstate_in_guest;
337*4882a593Smuzhiyun }
338*4882a593Smuzhiyun 
339*4882a593Smuzhiyun DECLARE_PER_CPU(struct kvm_vcpu *, current_vcpu);
340*4882a593Smuzhiyun 
kvm_before_interrupt(struct kvm_vcpu * vcpu)341*4882a593Smuzhiyun static inline void kvm_before_interrupt(struct kvm_vcpu *vcpu)
342*4882a593Smuzhiyun {
343*4882a593Smuzhiyun 	__this_cpu_write(current_vcpu, vcpu);
344*4882a593Smuzhiyun }
345*4882a593Smuzhiyun 
kvm_after_interrupt(struct kvm_vcpu * vcpu)346*4882a593Smuzhiyun static inline void kvm_after_interrupt(struct kvm_vcpu *vcpu)
347*4882a593Smuzhiyun {
348*4882a593Smuzhiyun 	__this_cpu_write(current_vcpu, NULL);
349*4882a593Smuzhiyun }
350*4882a593Smuzhiyun 
351*4882a593Smuzhiyun 
kvm_pat_valid(u64 data)352*4882a593Smuzhiyun static inline bool kvm_pat_valid(u64 data)
353*4882a593Smuzhiyun {
354*4882a593Smuzhiyun 	if (data & 0xF8F8F8F8F8F8F8F8ull)
355*4882a593Smuzhiyun 		return false;
356*4882a593Smuzhiyun 	/* 0, 1, 4, 5, 6, 7 are valid values.  */
357*4882a593Smuzhiyun 	return (data | ((data & 0x0202020202020202ull) << 1)) == data;
358*4882a593Smuzhiyun }
359*4882a593Smuzhiyun 
kvm_dr7_valid(u64 data)360*4882a593Smuzhiyun static inline bool kvm_dr7_valid(u64 data)
361*4882a593Smuzhiyun {
362*4882a593Smuzhiyun 	/* Bits [63:32] are reserved */
363*4882a593Smuzhiyun 	return !(data >> 32);
364*4882a593Smuzhiyun }
kvm_dr6_valid(u64 data)365*4882a593Smuzhiyun static inline bool kvm_dr6_valid(u64 data)
366*4882a593Smuzhiyun {
367*4882a593Smuzhiyun 	/* Bits [63:32] are reserved */
368*4882a593Smuzhiyun 	return !(data >> 32);
369*4882a593Smuzhiyun }
370*4882a593Smuzhiyun 
371*4882a593Smuzhiyun void kvm_load_guest_xsave_state(struct kvm_vcpu *vcpu);
372*4882a593Smuzhiyun void kvm_load_host_xsave_state(struct kvm_vcpu *vcpu);
373*4882a593Smuzhiyun int kvm_spec_ctrl_test_value(u64 value);
374*4882a593Smuzhiyun int kvm_valid_cr4(struct kvm_vcpu *vcpu, unsigned long cr4);
375*4882a593Smuzhiyun bool kvm_vcpu_exit_request(struct kvm_vcpu *vcpu);
376*4882a593Smuzhiyun int kvm_handle_memory_failure(struct kvm_vcpu *vcpu, int r,
377*4882a593Smuzhiyun 			      struct x86_exception *e);
378*4882a593Smuzhiyun int kvm_handle_invpcid(struct kvm_vcpu *vcpu, unsigned long type, gva_t gva);
379*4882a593Smuzhiyun bool kvm_msr_allowed(struct kvm_vcpu *vcpu, u32 index, u32 type);
380*4882a593Smuzhiyun 
381*4882a593Smuzhiyun /*
382*4882a593Smuzhiyun  * Internal error codes that are used to indicate that MSR emulation encountered
383*4882a593Smuzhiyun  * an error that should result in #GP in the guest, unless userspace
384*4882a593Smuzhiyun  * handles it.
385*4882a593Smuzhiyun  */
386*4882a593Smuzhiyun #define  KVM_MSR_RET_INVALID	2	/* in-kernel MSR emulation #GP condition */
387*4882a593Smuzhiyun #define  KVM_MSR_RET_FILTERED	3	/* #GP due to userspace MSR filter */
388*4882a593Smuzhiyun 
389*4882a593Smuzhiyun #define __cr4_reserved_bits(__cpu_has, __c)             \
390*4882a593Smuzhiyun ({                                                      \
391*4882a593Smuzhiyun 	u64 __reserved_bits = CR4_RESERVED_BITS;        \
392*4882a593Smuzhiyun                                                         \
393*4882a593Smuzhiyun 	if (!__cpu_has(__c, X86_FEATURE_XSAVE))         \
394*4882a593Smuzhiyun 		__reserved_bits |= X86_CR4_OSXSAVE;     \
395*4882a593Smuzhiyun 	if (!__cpu_has(__c, X86_FEATURE_SMEP))          \
396*4882a593Smuzhiyun 		__reserved_bits |= X86_CR4_SMEP;        \
397*4882a593Smuzhiyun 	if (!__cpu_has(__c, X86_FEATURE_SMAP))          \
398*4882a593Smuzhiyun 		__reserved_bits |= X86_CR4_SMAP;        \
399*4882a593Smuzhiyun 	if (!__cpu_has(__c, X86_FEATURE_FSGSBASE))      \
400*4882a593Smuzhiyun 		__reserved_bits |= X86_CR4_FSGSBASE;    \
401*4882a593Smuzhiyun 	if (!__cpu_has(__c, X86_FEATURE_PKU))           \
402*4882a593Smuzhiyun 		__reserved_bits |= X86_CR4_PKE;         \
403*4882a593Smuzhiyun 	if (!__cpu_has(__c, X86_FEATURE_LA57))          \
404*4882a593Smuzhiyun 		__reserved_bits |= X86_CR4_LA57;        \
405*4882a593Smuzhiyun 	if (!__cpu_has(__c, X86_FEATURE_UMIP))          \
406*4882a593Smuzhiyun 		__reserved_bits |= X86_CR4_UMIP;        \
407*4882a593Smuzhiyun 	if (!__cpu_has(__c, X86_FEATURE_VMX))           \
408*4882a593Smuzhiyun 		__reserved_bits |= X86_CR4_VMXE;        \
409*4882a593Smuzhiyun 	if (!__cpu_has(__c, X86_FEATURE_PCID))          \
410*4882a593Smuzhiyun 		__reserved_bits |= X86_CR4_PCIDE;       \
411*4882a593Smuzhiyun 	__reserved_bits;                                \
412*4882a593Smuzhiyun })
413*4882a593Smuzhiyun 
414*4882a593Smuzhiyun #endif
415