1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0 */
2*4882a593Smuzhiyun #ifndef ARCH_X86_KVM_CPUID_H
3*4882a593Smuzhiyun #define ARCH_X86_KVM_CPUID_H
4*4882a593Smuzhiyun
5*4882a593Smuzhiyun #include "x86.h"
6*4882a593Smuzhiyun #include <asm/cpu.h>
7*4882a593Smuzhiyun #include <asm/processor.h>
8*4882a593Smuzhiyun #include <uapi/asm/kvm_para.h>
9*4882a593Smuzhiyun
10*4882a593Smuzhiyun extern u32 kvm_cpu_caps[NCAPINTS] __read_mostly;
11*4882a593Smuzhiyun void kvm_set_cpu_caps(void);
12*4882a593Smuzhiyun
13*4882a593Smuzhiyun void kvm_update_cpuid_runtime(struct kvm_vcpu *vcpu);
14*4882a593Smuzhiyun void kvm_update_pv_runtime(struct kvm_vcpu *vcpu);
15*4882a593Smuzhiyun struct kvm_cpuid_entry2 *kvm_find_cpuid_entry(struct kvm_vcpu *vcpu,
16*4882a593Smuzhiyun u32 function, u32 index);
17*4882a593Smuzhiyun int kvm_dev_ioctl_get_cpuid(struct kvm_cpuid2 *cpuid,
18*4882a593Smuzhiyun struct kvm_cpuid_entry2 __user *entries,
19*4882a593Smuzhiyun unsigned int type);
20*4882a593Smuzhiyun int kvm_vcpu_ioctl_set_cpuid(struct kvm_vcpu *vcpu,
21*4882a593Smuzhiyun struct kvm_cpuid *cpuid,
22*4882a593Smuzhiyun struct kvm_cpuid_entry __user *entries);
23*4882a593Smuzhiyun int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu,
24*4882a593Smuzhiyun struct kvm_cpuid2 *cpuid,
25*4882a593Smuzhiyun struct kvm_cpuid_entry2 __user *entries);
26*4882a593Smuzhiyun int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu,
27*4882a593Smuzhiyun struct kvm_cpuid2 *cpuid,
28*4882a593Smuzhiyun struct kvm_cpuid_entry2 __user *entries);
29*4882a593Smuzhiyun bool kvm_cpuid(struct kvm_vcpu *vcpu, u32 *eax, u32 *ebx,
30*4882a593Smuzhiyun u32 *ecx, u32 *edx, bool exact_only);
31*4882a593Smuzhiyun
32*4882a593Smuzhiyun int cpuid_query_maxphyaddr(struct kvm_vcpu *vcpu);
33*4882a593Smuzhiyun
cpuid_maxphyaddr(struct kvm_vcpu * vcpu)34*4882a593Smuzhiyun static inline int cpuid_maxphyaddr(struct kvm_vcpu *vcpu)
35*4882a593Smuzhiyun {
36*4882a593Smuzhiyun return vcpu->arch.maxphyaddr;
37*4882a593Smuzhiyun }
38*4882a593Smuzhiyun
kvm_vcpu_is_illegal_gpa(struct kvm_vcpu * vcpu,gpa_t gpa)39*4882a593Smuzhiyun static inline bool kvm_vcpu_is_illegal_gpa(struct kvm_vcpu *vcpu, gpa_t gpa)
40*4882a593Smuzhiyun {
41*4882a593Smuzhiyun return (gpa >= BIT_ULL(cpuid_maxphyaddr(vcpu)));
42*4882a593Smuzhiyun }
43*4882a593Smuzhiyun
44*4882a593Smuzhiyun struct cpuid_reg {
45*4882a593Smuzhiyun u32 function;
46*4882a593Smuzhiyun u32 index;
47*4882a593Smuzhiyun int reg;
48*4882a593Smuzhiyun };
49*4882a593Smuzhiyun
50*4882a593Smuzhiyun static const struct cpuid_reg reverse_cpuid[] = {
51*4882a593Smuzhiyun [CPUID_1_EDX] = { 1, 0, CPUID_EDX},
52*4882a593Smuzhiyun [CPUID_8000_0001_EDX] = {0x80000001, 0, CPUID_EDX},
53*4882a593Smuzhiyun [CPUID_8086_0001_EDX] = {0x80860001, 0, CPUID_EDX},
54*4882a593Smuzhiyun [CPUID_1_ECX] = { 1, 0, CPUID_ECX},
55*4882a593Smuzhiyun [CPUID_C000_0001_EDX] = {0xc0000001, 0, CPUID_EDX},
56*4882a593Smuzhiyun [CPUID_8000_0001_ECX] = {0x80000001, 0, CPUID_ECX},
57*4882a593Smuzhiyun [CPUID_7_0_EBX] = { 7, 0, CPUID_EBX},
58*4882a593Smuzhiyun [CPUID_D_1_EAX] = { 0xd, 1, CPUID_EAX},
59*4882a593Smuzhiyun [CPUID_8000_0008_EBX] = {0x80000008, 0, CPUID_EBX},
60*4882a593Smuzhiyun [CPUID_6_EAX] = { 6, 0, CPUID_EAX},
61*4882a593Smuzhiyun [CPUID_8000_000A_EDX] = {0x8000000a, 0, CPUID_EDX},
62*4882a593Smuzhiyun [CPUID_7_ECX] = { 7, 0, CPUID_ECX},
63*4882a593Smuzhiyun [CPUID_8000_0007_EBX] = {0x80000007, 0, CPUID_EBX},
64*4882a593Smuzhiyun [CPUID_7_EDX] = { 7, 0, CPUID_EDX},
65*4882a593Smuzhiyun [CPUID_7_1_EAX] = { 7, 1, CPUID_EAX},
66*4882a593Smuzhiyun };
67*4882a593Smuzhiyun
68*4882a593Smuzhiyun /*
69*4882a593Smuzhiyun * Reverse CPUID and its derivatives can only be used for hardware-defined
70*4882a593Smuzhiyun * feature words, i.e. words whose bits directly correspond to a CPUID leaf.
71*4882a593Smuzhiyun * Retrieving a feature bit or masking guest CPUID from a Linux-defined word
72*4882a593Smuzhiyun * is nonsensical as the bit number/mask is an arbitrary software-defined value
73*4882a593Smuzhiyun * and can't be used by KVM to query/control guest capabilities. And obviously
74*4882a593Smuzhiyun * the leaf being queried must have an entry in the lookup table.
75*4882a593Smuzhiyun */
reverse_cpuid_check(unsigned int x86_leaf)76*4882a593Smuzhiyun static __always_inline void reverse_cpuid_check(unsigned int x86_leaf)
77*4882a593Smuzhiyun {
78*4882a593Smuzhiyun BUILD_BUG_ON(x86_leaf == CPUID_LNX_1);
79*4882a593Smuzhiyun BUILD_BUG_ON(x86_leaf == CPUID_LNX_2);
80*4882a593Smuzhiyun BUILD_BUG_ON(x86_leaf == CPUID_LNX_3);
81*4882a593Smuzhiyun BUILD_BUG_ON(x86_leaf == CPUID_LNX_4);
82*4882a593Smuzhiyun BUILD_BUG_ON(x86_leaf >= ARRAY_SIZE(reverse_cpuid));
83*4882a593Smuzhiyun BUILD_BUG_ON(reverse_cpuid[x86_leaf].function == 0);
84*4882a593Smuzhiyun }
85*4882a593Smuzhiyun
86*4882a593Smuzhiyun /*
87*4882a593Smuzhiyun * Retrieve the bit mask from an X86_FEATURE_* definition. Features contain
88*4882a593Smuzhiyun * the hardware defined bit number (stored in bits 4:0) and a software defined
89*4882a593Smuzhiyun * "word" (stored in bits 31:5). The word is used to index into arrays of
90*4882a593Smuzhiyun * bit masks that hold the per-cpu feature capabilities, e.g. this_cpu_has().
91*4882a593Smuzhiyun */
__feature_bit(int x86_feature)92*4882a593Smuzhiyun static __always_inline u32 __feature_bit(int x86_feature)
93*4882a593Smuzhiyun {
94*4882a593Smuzhiyun reverse_cpuid_check(x86_feature / 32);
95*4882a593Smuzhiyun return 1 << (x86_feature & 31);
96*4882a593Smuzhiyun }
97*4882a593Smuzhiyun
98*4882a593Smuzhiyun #define feature_bit(name) __feature_bit(X86_FEATURE_##name)
99*4882a593Smuzhiyun
x86_feature_cpuid(unsigned int x86_feature)100*4882a593Smuzhiyun static __always_inline struct cpuid_reg x86_feature_cpuid(unsigned int x86_feature)
101*4882a593Smuzhiyun {
102*4882a593Smuzhiyun unsigned int x86_leaf = x86_feature / 32;
103*4882a593Smuzhiyun
104*4882a593Smuzhiyun reverse_cpuid_check(x86_leaf);
105*4882a593Smuzhiyun return reverse_cpuid[x86_leaf];
106*4882a593Smuzhiyun }
107*4882a593Smuzhiyun
__cpuid_entry_get_reg(struct kvm_cpuid_entry2 * entry,u32 reg)108*4882a593Smuzhiyun static __always_inline u32 *__cpuid_entry_get_reg(struct kvm_cpuid_entry2 *entry,
109*4882a593Smuzhiyun u32 reg)
110*4882a593Smuzhiyun {
111*4882a593Smuzhiyun switch (reg) {
112*4882a593Smuzhiyun case CPUID_EAX:
113*4882a593Smuzhiyun return &entry->eax;
114*4882a593Smuzhiyun case CPUID_EBX:
115*4882a593Smuzhiyun return &entry->ebx;
116*4882a593Smuzhiyun case CPUID_ECX:
117*4882a593Smuzhiyun return &entry->ecx;
118*4882a593Smuzhiyun case CPUID_EDX:
119*4882a593Smuzhiyun return &entry->edx;
120*4882a593Smuzhiyun default:
121*4882a593Smuzhiyun BUILD_BUG();
122*4882a593Smuzhiyun return NULL;
123*4882a593Smuzhiyun }
124*4882a593Smuzhiyun }
125*4882a593Smuzhiyun
cpuid_entry_get_reg(struct kvm_cpuid_entry2 * entry,unsigned int x86_feature)126*4882a593Smuzhiyun static __always_inline u32 *cpuid_entry_get_reg(struct kvm_cpuid_entry2 *entry,
127*4882a593Smuzhiyun unsigned int x86_feature)
128*4882a593Smuzhiyun {
129*4882a593Smuzhiyun const struct cpuid_reg cpuid = x86_feature_cpuid(x86_feature);
130*4882a593Smuzhiyun
131*4882a593Smuzhiyun return __cpuid_entry_get_reg(entry, cpuid.reg);
132*4882a593Smuzhiyun }
133*4882a593Smuzhiyun
cpuid_entry_get(struct kvm_cpuid_entry2 * entry,unsigned int x86_feature)134*4882a593Smuzhiyun static __always_inline u32 cpuid_entry_get(struct kvm_cpuid_entry2 *entry,
135*4882a593Smuzhiyun unsigned int x86_feature)
136*4882a593Smuzhiyun {
137*4882a593Smuzhiyun u32 *reg = cpuid_entry_get_reg(entry, x86_feature);
138*4882a593Smuzhiyun
139*4882a593Smuzhiyun return *reg & __feature_bit(x86_feature);
140*4882a593Smuzhiyun }
141*4882a593Smuzhiyun
cpuid_entry_has(struct kvm_cpuid_entry2 * entry,unsigned int x86_feature)142*4882a593Smuzhiyun static __always_inline bool cpuid_entry_has(struct kvm_cpuid_entry2 *entry,
143*4882a593Smuzhiyun unsigned int x86_feature)
144*4882a593Smuzhiyun {
145*4882a593Smuzhiyun return cpuid_entry_get(entry, x86_feature);
146*4882a593Smuzhiyun }
147*4882a593Smuzhiyun
cpuid_entry_clear(struct kvm_cpuid_entry2 * entry,unsigned int x86_feature)148*4882a593Smuzhiyun static __always_inline void cpuid_entry_clear(struct kvm_cpuid_entry2 *entry,
149*4882a593Smuzhiyun unsigned int x86_feature)
150*4882a593Smuzhiyun {
151*4882a593Smuzhiyun u32 *reg = cpuid_entry_get_reg(entry, x86_feature);
152*4882a593Smuzhiyun
153*4882a593Smuzhiyun *reg &= ~__feature_bit(x86_feature);
154*4882a593Smuzhiyun }
155*4882a593Smuzhiyun
cpuid_entry_set(struct kvm_cpuid_entry2 * entry,unsigned int x86_feature)156*4882a593Smuzhiyun static __always_inline void cpuid_entry_set(struct kvm_cpuid_entry2 *entry,
157*4882a593Smuzhiyun unsigned int x86_feature)
158*4882a593Smuzhiyun {
159*4882a593Smuzhiyun u32 *reg = cpuid_entry_get_reg(entry, x86_feature);
160*4882a593Smuzhiyun
161*4882a593Smuzhiyun *reg |= __feature_bit(x86_feature);
162*4882a593Smuzhiyun }
163*4882a593Smuzhiyun
cpuid_entry_change(struct kvm_cpuid_entry2 * entry,unsigned int x86_feature,bool set)164*4882a593Smuzhiyun static __always_inline void cpuid_entry_change(struct kvm_cpuid_entry2 *entry,
165*4882a593Smuzhiyun unsigned int x86_feature,
166*4882a593Smuzhiyun bool set)
167*4882a593Smuzhiyun {
168*4882a593Smuzhiyun u32 *reg = cpuid_entry_get_reg(entry, x86_feature);
169*4882a593Smuzhiyun
170*4882a593Smuzhiyun /*
171*4882a593Smuzhiyun * Open coded instead of using cpuid_entry_{clear,set}() to coerce the
172*4882a593Smuzhiyun * compiler into using CMOV instead of Jcc when possible.
173*4882a593Smuzhiyun */
174*4882a593Smuzhiyun if (set)
175*4882a593Smuzhiyun *reg |= __feature_bit(x86_feature);
176*4882a593Smuzhiyun else
177*4882a593Smuzhiyun *reg &= ~__feature_bit(x86_feature);
178*4882a593Smuzhiyun }
179*4882a593Smuzhiyun
cpuid_entry_override(struct kvm_cpuid_entry2 * entry,enum cpuid_leafs leaf)180*4882a593Smuzhiyun static __always_inline void cpuid_entry_override(struct kvm_cpuid_entry2 *entry,
181*4882a593Smuzhiyun enum cpuid_leafs leaf)
182*4882a593Smuzhiyun {
183*4882a593Smuzhiyun u32 *reg = cpuid_entry_get_reg(entry, leaf * 32);
184*4882a593Smuzhiyun
185*4882a593Smuzhiyun BUILD_BUG_ON(leaf >= ARRAY_SIZE(kvm_cpu_caps));
186*4882a593Smuzhiyun *reg = kvm_cpu_caps[leaf];
187*4882a593Smuzhiyun }
188*4882a593Smuzhiyun
guest_cpuid_get_register(struct kvm_vcpu * vcpu,unsigned int x86_feature)189*4882a593Smuzhiyun static __always_inline u32 *guest_cpuid_get_register(struct kvm_vcpu *vcpu,
190*4882a593Smuzhiyun unsigned int x86_feature)
191*4882a593Smuzhiyun {
192*4882a593Smuzhiyun const struct cpuid_reg cpuid = x86_feature_cpuid(x86_feature);
193*4882a593Smuzhiyun struct kvm_cpuid_entry2 *entry;
194*4882a593Smuzhiyun
195*4882a593Smuzhiyun entry = kvm_find_cpuid_entry(vcpu, cpuid.function, cpuid.index);
196*4882a593Smuzhiyun if (!entry)
197*4882a593Smuzhiyun return NULL;
198*4882a593Smuzhiyun
199*4882a593Smuzhiyun return __cpuid_entry_get_reg(entry, cpuid.reg);
200*4882a593Smuzhiyun }
201*4882a593Smuzhiyun
guest_cpuid_has(struct kvm_vcpu * vcpu,unsigned int x86_feature)202*4882a593Smuzhiyun static __always_inline bool guest_cpuid_has(struct kvm_vcpu *vcpu,
203*4882a593Smuzhiyun unsigned int x86_feature)
204*4882a593Smuzhiyun {
205*4882a593Smuzhiyun u32 *reg;
206*4882a593Smuzhiyun
207*4882a593Smuzhiyun reg = guest_cpuid_get_register(vcpu, x86_feature);
208*4882a593Smuzhiyun if (!reg)
209*4882a593Smuzhiyun return false;
210*4882a593Smuzhiyun
211*4882a593Smuzhiyun return *reg & __feature_bit(x86_feature);
212*4882a593Smuzhiyun }
213*4882a593Smuzhiyun
guest_cpuid_clear(struct kvm_vcpu * vcpu,unsigned int x86_feature)214*4882a593Smuzhiyun static __always_inline void guest_cpuid_clear(struct kvm_vcpu *vcpu,
215*4882a593Smuzhiyun unsigned int x86_feature)
216*4882a593Smuzhiyun {
217*4882a593Smuzhiyun u32 *reg;
218*4882a593Smuzhiyun
219*4882a593Smuzhiyun reg = guest_cpuid_get_register(vcpu, x86_feature);
220*4882a593Smuzhiyun if (reg)
221*4882a593Smuzhiyun *reg &= ~__feature_bit(x86_feature);
222*4882a593Smuzhiyun }
223*4882a593Smuzhiyun
guest_cpuid_is_amd_or_hygon(struct kvm_vcpu * vcpu)224*4882a593Smuzhiyun static inline bool guest_cpuid_is_amd_or_hygon(struct kvm_vcpu *vcpu)
225*4882a593Smuzhiyun {
226*4882a593Smuzhiyun struct kvm_cpuid_entry2 *best;
227*4882a593Smuzhiyun
228*4882a593Smuzhiyun best = kvm_find_cpuid_entry(vcpu, 0, 0);
229*4882a593Smuzhiyun return best &&
230*4882a593Smuzhiyun (is_guest_vendor_amd(best->ebx, best->ecx, best->edx) ||
231*4882a593Smuzhiyun is_guest_vendor_hygon(best->ebx, best->ecx, best->edx));
232*4882a593Smuzhiyun }
233*4882a593Smuzhiyun
guest_cpuid_family(struct kvm_vcpu * vcpu)234*4882a593Smuzhiyun static inline int guest_cpuid_family(struct kvm_vcpu *vcpu)
235*4882a593Smuzhiyun {
236*4882a593Smuzhiyun struct kvm_cpuid_entry2 *best;
237*4882a593Smuzhiyun
238*4882a593Smuzhiyun best = kvm_find_cpuid_entry(vcpu, 0x1, 0);
239*4882a593Smuzhiyun if (!best)
240*4882a593Smuzhiyun return -1;
241*4882a593Smuzhiyun
242*4882a593Smuzhiyun return x86_family(best->eax);
243*4882a593Smuzhiyun }
244*4882a593Smuzhiyun
guest_cpuid_model(struct kvm_vcpu * vcpu)245*4882a593Smuzhiyun static inline int guest_cpuid_model(struct kvm_vcpu *vcpu)
246*4882a593Smuzhiyun {
247*4882a593Smuzhiyun struct kvm_cpuid_entry2 *best;
248*4882a593Smuzhiyun
249*4882a593Smuzhiyun best = kvm_find_cpuid_entry(vcpu, 0x1, 0);
250*4882a593Smuzhiyun if (!best)
251*4882a593Smuzhiyun return -1;
252*4882a593Smuzhiyun
253*4882a593Smuzhiyun return x86_model(best->eax);
254*4882a593Smuzhiyun }
255*4882a593Smuzhiyun
guest_cpuid_stepping(struct kvm_vcpu * vcpu)256*4882a593Smuzhiyun static inline int guest_cpuid_stepping(struct kvm_vcpu *vcpu)
257*4882a593Smuzhiyun {
258*4882a593Smuzhiyun struct kvm_cpuid_entry2 *best;
259*4882a593Smuzhiyun
260*4882a593Smuzhiyun best = kvm_find_cpuid_entry(vcpu, 0x1, 0);
261*4882a593Smuzhiyun if (!best)
262*4882a593Smuzhiyun return -1;
263*4882a593Smuzhiyun
264*4882a593Smuzhiyun return x86_stepping(best->eax);
265*4882a593Smuzhiyun }
266*4882a593Smuzhiyun
guest_has_spec_ctrl_msr(struct kvm_vcpu * vcpu)267*4882a593Smuzhiyun static inline bool guest_has_spec_ctrl_msr(struct kvm_vcpu *vcpu)
268*4882a593Smuzhiyun {
269*4882a593Smuzhiyun return (guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL) ||
270*4882a593Smuzhiyun guest_cpuid_has(vcpu, X86_FEATURE_AMD_STIBP) ||
271*4882a593Smuzhiyun guest_cpuid_has(vcpu, X86_FEATURE_AMD_IBRS) ||
272*4882a593Smuzhiyun guest_cpuid_has(vcpu, X86_FEATURE_AMD_SSBD));
273*4882a593Smuzhiyun }
274*4882a593Smuzhiyun
guest_has_pred_cmd_msr(struct kvm_vcpu * vcpu)275*4882a593Smuzhiyun static inline bool guest_has_pred_cmd_msr(struct kvm_vcpu *vcpu)
276*4882a593Smuzhiyun {
277*4882a593Smuzhiyun return (guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL) ||
278*4882a593Smuzhiyun guest_cpuid_has(vcpu, X86_FEATURE_AMD_IBPB));
279*4882a593Smuzhiyun }
280*4882a593Smuzhiyun
supports_cpuid_fault(struct kvm_vcpu * vcpu)281*4882a593Smuzhiyun static inline bool supports_cpuid_fault(struct kvm_vcpu *vcpu)
282*4882a593Smuzhiyun {
283*4882a593Smuzhiyun return vcpu->arch.msr_platform_info & MSR_PLATFORM_INFO_CPUID_FAULT;
284*4882a593Smuzhiyun }
285*4882a593Smuzhiyun
cpuid_fault_enabled(struct kvm_vcpu * vcpu)286*4882a593Smuzhiyun static inline bool cpuid_fault_enabled(struct kvm_vcpu *vcpu)
287*4882a593Smuzhiyun {
288*4882a593Smuzhiyun return vcpu->arch.msr_misc_features_enables &
289*4882a593Smuzhiyun MSR_MISC_FEATURES_ENABLES_CPUID_FAULT;
290*4882a593Smuzhiyun }
291*4882a593Smuzhiyun
kvm_cpu_cap_clear(unsigned int x86_feature)292*4882a593Smuzhiyun static __always_inline void kvm_cpu_cap_clear(unsigned int x86_feature)
293*4882a593Smuzhiyun {
294*4882a593Smuzhiyun unsigned int x86_leaf = x86_feature / 32;
295*4882a593Smuzhiyun
296*4882a593Smuzhiyun reverse_cpuid_check(x86_leaf);
297*4882a593Smuzhiyun kvm_cpu_caps[x86_leaf] &= ~__feature_bit(x86_feature);
298*4882a593Smuzhiyun }
299*4882a593Smuzhiyun
kvm_cpu_cap_set(unsigned int x86_feature)300*4882a593Smuzhiyun static __always_inline void kvm_cpu_cap_set(unsigned int x86_feature)
301*4882a593Smuzhiyun {
302*4882a593Smuzhiyun unsigned int x86_leaf = x86_feature / 32;
303*4882a593Smuzhiyun
304*4882a593Smuzhiyun reverse_cpuid_check(x86_leaf);
305*4882a593Smuzhiyun kvm_cpu_caps[x86_leaf] |= __feature_bit(x86_feature);
306*4882a593Smuzhiyun }
307*4882a593Smuzhiyun
kvm_cpu_cap_get(unsigned int x86_feature)308*4882a593Smuzhiyun static __always_inline u32 kvm_cpu_cap_get(unsigned int x86_feature)
309*4882a593Smuzhiyun {
310*4882a593Smuzhiyun unsigned int x86_leaf = x86_feature / 32;
311*4882a593Smuzhiyun
312*4882a593Smuzhiyun reverse_cpuid_check(x86_leaf);
313*4882a593Smuzhiyun return kvm_cpu_caps[x86_leaf] & __feature_bit(x86_feature);
314*4882a593Smuzhiyun }
315*4882a593Smuzhiyun
kvm_cpu_cap_has(unsigned int x86_feature)316*4882a593Smuzhiyun static __always_inline bool kvm_cpu_cap_has(unsigned int x86_feature)
317*4882a593Smuzhiyun {
318*4882a593Smuzhiyun return !!kvm_cpu_cap_get(x86_feature);
319*4882a593Smuzhiyun }
320*4882a593Smuzhiyun
kvm_cpu_cap_check_and_set(unsigned int x86_feature)321*4882a593Smuzhiyun static __always_inline void kvm_cpu_cap_check_and_set(unsigned int x86_feature)
322*4882a593Smuzhiyun {
323*4882a593Smuzhiyun if (boot_cpu_has(x86_feature))
324*4882a593Smuzhiyun kvm_cpu_cap_set(x86_feature);
325*4882a593Smuzhiyun }
326*4882a593Smuzhiyun
page_address_valid(struct kvm_vcpu * vcpu,gpa_t gpa)327*4882a593Smuzhiyun static inline bool page_address_valid(struct kvm_vcpu *vcpu, gpa_t gpa)
328*4882a593Smuzhiyun {
329*4882a593Smuzhiyun return PAGE_ALIGNED(gpa) && !(gpa >> cpuid_maxphyaddr(vcpu));
330*4882a593Smuzhiyun }
331*4882a593Smuzhiyun
guest_pv_has(struct kvm_vcpu * vcpu,unsigned int kvm_feature)332*4882a593Smuzhiyun static __always_inline bool guest_pv_has(struct kvm_vcpu *vcpu,
333*4882a593Smuzhiyun unsigned int kvm_feature)
334*4882a593Smuzhiyun {
335*4882a593Smuzhiyun if (!vcpu->arch.pv_cpuid.enforce)
336*4882a593Smuzhiyun return true;
337*4882a593Smuzhiyun
338*4882a593Smuzhiyun return vcpu->arch.pv_cpuid.features & (1u << kvm_feature);
339*4882a593Smuzhiyun }
340*4882a593Smuzhiyun
341*4882a593Smuzhiyun #endif
342