xref: /OK3568_Linux_fs/kernel/arch/x86/kvm/kvm_cache_regs.h (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0 */
2*4882a593Smuzhiyun #ifndef ASM_KVM_CACHE_REGS_H
3*4882a593Smuzhiyun #define ASM_KVM_CACHE_REGS_H
4*4882a593Smuzhiyun 
5*4882a593Smuzhiyun #include <linux/kvm_host.h>
6*4882a593Smuzhiyun 
7*4882a593Smuzhiyun #define KVM_POSSIBLE_CR0_GUEST_BITS X86_CR0_TS
8*4882a593Smuzhiyun #define KVM_POSSIBLE_CR4_GUEST_BITS				  \
9*4882a593Smuzhiyun 	(X86_CR4_PVI | X86_CR4_DE | X86_CR4_PCE | X86_CR4_OSFXSR  \
10*4882a593Smuzhiyun 	 | X86_CR4_OSXMMEXCPT | X86_CR4_PGE | X86_CR4_TSD | X86_CR4_FSGSBASE)
11*4882a593Smuzhiyun 
12*4882a593Smuzhiyun #define BUILD_KVM_GPR_ACCESSORS(lname, uname)				      \
13*4882a593Smuzhiyun static __always_inline unsigned long kvm_##lname##_read(struct kvm_vcpu *vcpu)\
14*4882a593Smuzhiyun {									      \
15*4882a593Smuzhiyun 	return vcpu->arch.regs[VCPU_REGS_##uname];			      \
16*4882a593Smuzhiyun }									      \
17*4882a593Smuzhiyun static __always_inline void kvm_##lname##_write(struct kvm_vcpu *vcpu,	      \
18*4882a593Smuzhiyun 						unsigned long val)	      \
19*4882a593Smuzhiyun {									      \
20*4882a593Smuzhiyun 	vcpu->arch.regs[VCPU_REGS_##uname] = val;			      \
21*4882a593Smuzhiyun }
BUILD_KVM_GPR_ACCESSORS(rax,RAX)22*4882a593Smuzhiyun BUILD_KVM_GPR_ACCESSORS(rax, RAX)
23*4882a593Smuzhiyun BUILD_KVM_GPR_ACCESSORS(rbx, RBX)
24*4882a593Smuzhiyun BUILD_KVM_GPR_ACCESSORS(rcx, RCX)
25*4882a593Smuzhiyun BUILD_KVM_GPR_ACCESSORS(rdx, RDX)
26*4882a593Smuzhiyun BUILD_KVM_GPR_ACCESSORS(rbp, RBP)
27*4882a593Smuzhiyun BUILD_KVM_GPR_ACCESSORS(rsi, RSI)
28*4882a593Smuzhiyun BUILD_KVM_GPR_ACCESSORS(rdi, RDI)
29*4882a593Smuzhiyun #ifdef CONFIG_X86_64
30*4882a593Smuzhiyun BUILD_KVM_GPR_ACCESSORS(r8,  R8)
31*4882a593Smuzhiyun BUILD_KVM_GPR_ACCESSORS(r9,  R9)
32*4882a593Smuzhiyun BUILD_KVM_GPR_ACCESSORS(r10, R10)
33*4882a593Smuzhiyun BUILD_KVM_GPR_ACCESSORS(r11, R11)
34*4882a593Smuzhiyun BUILD_KVM_GPR_ACCESSORS(r12, R12)
35*4882a593Smuzhiyun BUILD_KVM_GPR_ACCESSORS(r13, R13)
36*4882a593Smuzhiyun BUILD_KVM_GPR_ACCESSORS(r14, R14)
37*4882a593Smuzhiyun BUILD_KVM_GPR_ACCESSORS(r15, R15)
38*4882a593Smuzhiyun #endif
39*4882a593Smuzhiyun 
40*4882a593Smuzhiyun static inline bool kvm_register_is_available(struct kvm_vcpu *vcpu,
41*4882a593Smuzhiyun 					     enum kvm_reg reg)
42*4882a593Smuzhiyun {
43*4882a593Smuzhiyun 	return test_bit(reg, (unsigned long *)&vcpu->arch.regs_avail);
44*4882a593Smuzhiyun }
45*4882a593Smuzhiyun 
kvm_register_is_dirty(struct kvm_vcpu * vcpu,enum kvm_reg reg)46*4882a593Smuzhiyun static inline bool kvm_register_is_dirty(struct kvm_vcpu *vcpu,
47*4882a593Smuzhiyun 					 enum kvm_reg reg)
48*4882a593Smuzhiyun {
49*4882a593Smuzhiyun 	return test_bit(reg, (unsigned long *)&vcpu->arch.regs_dirty);
50*4882a593Smuzhiyun }
51*4882a593Smuzhiyun 
kvm_register_mark_available(struct kvm_vcpu * vcpu,enum kvm_reg reg)52*4882a593Smuzhiyun static inline void kvm_register_mark_available(struct kvm_vcpu *vcpu,
53*4882a593Smuzhiyun 					       enum kvm_reg reg)
54*4882a593Smuzhiyun {
55*4882a593Smuzhiyun 	__set_bit(reg, (unsigned long *)&vcpu->arch.regs_avail);
56*4882a593Smuzhiyun }
57*4882a593Smuzhiyun 
kvm_register_mark_dirty(struct kvm_vcpu * vcpu,enum kvm_reg reg)58*4882a593Smuzhiyun static inline void kvm_register_mark_dirty(struct kvm_vcpu *vcpu,
59*4882a593Smuzhiyun 					   enum kvm_reg reg)
60*4882a593Smuzhiyun {
61*4882a593Smuzhiyun 	__set_bit(reg, (unsigned long *)&vcpu->arch.regs_avail);
62*4882a593Smuzhiyun 	__set_bit(reg, (unsigned long *)&vcpu->arch.regs_dirty);
63*4882a593Smuzhiyun }
64*4882a593Smuzhiyun 
kvm_register_read(struct kvm_vcpu * vcpu,int reg)65*4882a593Smuzhiyun static inline unsigned long kvm_register_read(struct kvm_vcpu *vcpu, int reg)
66*4882a593Smuzhiyun {
67*4882a593Smuzhiyun 	if (WARN_ON_ONCE((unsigned int)reg >= NR_VCPU_REGS))
68*4882a593Smuzhiyun 		return 0;
69*4882a593Smuzhiyun 
70*4882a593Smuzhiyun 	if (!kvm_register_is_available(vcpu, reg))
71*4882a593Smuzhiyun 		kvm_x86_ops.cache_reg(vcpu, reg);
72*4882a593Smuzhiyun 
73*4882a593Smuzhiyun 	return vcpu->arch.regs[reg];
74*4882a593Smuzhiyun }
75*4882a593Smuzhiyun 
kvm_register_write(struct kvm_vcpu * vcpu,int reg,unsigned long val)76*4882a593Smuzhiyun static inline void kvm_register_write(struct kvm_vcpu *vcpu, int reg,
77*4882a593Smuzhiyun 				      unsigned long val)
78*4882a593Smuzhiyun {
79*4882a593Smuzhiyun 	if (WARN_ON_ONCE((unsigned int)reg >= NR_VCPU_REGS))
80*4882a593Smuzhiyun 		return;
81*4882a593Smuzhiyun 
82*4882a593Smuzhiyun 	vcpu->arch.regs[reg] = val;
83*4882a593Smuzhiyun 	kvm_register_mark_dirty(vcpu, reg);
84*4882a593Smuzhiyun }
85*4882a593Smuzhiyun 
kvm_rip_read(struct kvm_vcpu * vcpu)86*4882a593Smuzhiyun static inline unsigned long kvm_rip_read(struct kvm_vcpu *vcpu)
87*4882a593Smuzhiyun {
88*4882a593Smuzhiyun 	return kvm_register_read(vcpu, VCPU_REGS_RIP);
89*4882a593Smuzhiyun }
90*4882a593Smuzhiyun 
kvm_rip_write(struct kvm_vcpu * vcpu,unsigned long val)91*4882a593Smuzhiyun static inline void kvm_rip_write(struct kvm_vcpu *vcpu, unsigned long val)
92*4882a593Smuzhiyun {
93*4882a593Smuzhiyun 	kvm_register_write(vcpu, VCPU_REGS_RIP, val);
94*4882a593Smuzhiyun }
95*4882a593Smuzhiyun 
kvm_rsp_read(struct kvm_vcpu * vcpu)96*4882a593Smuzhiyun static inline unsigned long kvm_rsp_read(struct kvm_vcpu *vcpu)
97*4882a593Smuzhiyun {
98*4882a593Smuzhiyun 	return kvm_register_read(vcpu, VCPU_REGS_RSP);
99*4882a593Smuzhiyun }
100*4882a593Smuzhiyun 
kvm_rsp_write(struct kvm_vcpu * vcpu,unsigned long val)101*4882a593Smuzhiyun static inline void kvm_rsp_write(struct kvm_vcpu *vcpu, unsigned long val)
102*4882a593Smuzhiyun {
103*4882a593Smuzhiyun 	kvm_register_write(vcpu, VCPU_REGS_RSP, val);
104*4882a593Smuzhiyun }
105*4882a593Smuzhiyun 
kvm_pdptr_read(struct kvm_vcpu * vcpu,int index)106*4882a593Smuzhiyun static inline u64 kvm_pdptr_read(struct kvm_vcpu *vcpu, int index)
107*4882a593Smuzhiyun {
108*4882a593Smuzhiyun 	might_sleep();  /* on svm */
109*4882a593Smuzhiyun 
110*4882a593Smuzhiyun 	if (!kvm_register_is_available(vcpu, VCPU_EXREG_PDPTR))
111*4882a593Smuzhiyun 		kvm_x86_ops.cache_reg(vcpu, VCPU_EXREG_PDPTR);
112*4882a593Smuzhiyun 
113*4882a593Smuzhiyun 	return vcpu->arch.walk_mmu->pdptrs[index];
114*4882a593Smuzhiyun }
115*4882a593Smuzhiyun 
kvm_read_cr0_bits(struct kvm_vcpu * vcpu,ulong mask)116*4882a593Smuzhiyun static inline ulong kvm_read_cr0_bits(struct kvm_vcpu *vcpu, ulong mask)
117*4882a593Smuzhiyun {
118*4882a593Smuzhiyun 	ulong tmask = mask & KVM_POSSIBLE_CR0_GUEST_BITS;
119*4882a593Smuzhiyun 	if ((tmask & vcpu->arch.cr0_guest_owned_bits) &&
120*4882a593Smuzhiyun 	    !kvm_register_is_available(vcpu, VCPU_EXREG_CR0))
121*4882a593Smuzhiyun 		kvm_x86_ops.cache_reg(vcpu, VCPU_EXREG_CR0);
122*4882a593Smuzhiyun 	return vcpu->arch.cr0 & mask;
123*4882a593Smuzhiyun }
124*4882a593Smuzhiyun 
kvm_read_cr0(struct kvm_vcpu * vcpu)125*4882a593Smuzhiyun static inline ulong kvm_read_cr0(struct kvm_vcpu *vcpu)
126*4882a593Smuzhiyun {
127*4882a593Smuzhiyun 	return kvm_read_cr0_bits(vcpu, ~0UL);
128*4882a593Smuzhiyun }
129*4882a593Smuzhiyun 
kvm_read_cr4_bits(struct kvm_vcpu * vcpu,ulong mask)130*4882a593Smuzhiyun static inline ulong kvm_read_cr4_bits(struct kvm_vcpu *vcpu, ulong mask)
131*4882a593Smuzhiyun {
132*4882a593Smuzhiyun 	ulong tmask = mask & KVM_POSSIBLE_CR4_GUEST_BITS;
133*4882a593Smuzhiyun 	if ((tmask & vcpu->arch.cr4_guest_owned_bits) &&
134*4882a593Smuzhiyun 	    !kvm_register_is_available(vcpu, VCPU_EXREG_CR4))
135*4882a593Smuzhiyun 		kvm_x86_ops.cache_reg(vcpu, VCPU_EXREG_CR4);
136*4882a593Smuzhiyun 	return vcpu->arch.cr4 & mask;
137*4882a593Smuzhiyun }
138*4882a593Smuzhiyun 
kvm_read_cr3(struct kvm_vcpu * vcpu)139*4882a593Smuzhiyun static inline ulong kvm_read_cr3(struct kvm_vcpu *vcpu)
140*4882a593Smuzhiyun {
141*4882a593Smuzhiyun 	if (!kvm_register_is_available(vcpu, VCPU_EXREG_CR3))
142*4882a593Smuzhiyun 		kvm_x86_ops.cache_reg(vcpu, VCPU_EXREG_CR3);
143*4882a593Smuzhiyun 	return vcpu->arch.cr3;
144*4882a593Smuzhiyun }
145*4882a593Smuzhiyun 
kvm_read_cr4(struct kvm_vcpu * vcpu)146*4882a593Smuzhiyun static inline ulong kvm_read_cr4(struct kvm_vcpu *vcpu)
147*4882a593Smuzhiyun {
148*4882a593Smuzhiyun 	return kvm_read_cr4_bits(vcpu, ~0UL);
149*4882a593Smuzhiyun }
150*4882a593Smuzhiyun 
kvm_read_edx_eax(struct kvm_vcpu * vcpu)151*4882a593Smuzhiyun static inline u64 kvm_read_edx_eax(struct kvm_vcpu *vcpu)
152*4882a593Smuzhiyun {
153*4882a593Smuzhiyun 	return (kvm_rax_read(vcpu) & -1u)
154*4882a593Smuzhiyun 		| ((u64)(kvm_rdx_read(vcpu) & -1u) << 32);
155*4882a593Smuzhiyun }
156*4882a593Smuzhiyun 
enter_guest_mode(struct kvm_vcpu * vcpu)157*4882a593Smuzhiyun static inline void enter_guest_mode(struct kvm_vcpu *vcpu)
158*4882a593Smuzhiyun {
159*4882a593Smuzhiyun 	vcpu->arch.hflags |= HF_GUEST_MASK;
160*4882a593Smuzhiyun }
161*4882a593Smuzhiyun 
leave_guest_mode(struct kvm_vcpu * vcpu)162*4882a593Smuzhiyun static inline void leave_guest_mode(struct kvm_vcpu *vcpu)
163*4882a593Smuzhiyun {
164*4882a593Smuzhiyun 	vcpu->arch.hflags &= ~HF_GUEST_MASK;
165*4882a593Smuzhiyun 
166*4882a593Smuzhiyun 	if (vcpu->arch.load_eoi_exitmap_pending) {
167*4882a593Smuzhiyun 		vcpu->arch.load_eoi_exitmap_pending = false;
168*4882a593Smuzhiyun 		kvm_make_request(KVM_REQ_LOAD_EOI_EXITMAP, vcpu);
169*4882a593Smuzhiyun 	}
170*4882a593Smuzhiyun }
171*4882a593Smuzhiyun 
is_guest_mode(struct kvm_vcpu * vcpu)172*4882a593Smuzhiyun static inline bool is_guest_mode(struct kvm_vcpu *vcpu)
173*4882a593Smuzhiyun {
174*4882a593Smuzhiyun 	return vcpu->arch.hflags & HF_GUEST_MASK;
175*4882a593Smuzhiyun }
176*4882a593Smuzhiyun 
is_smm(struct kvm_vcpu * vcpu)177*4882a593Smuzhiyun static inline bool is_smm(struct kvm_vcpu *vcpu)
178*4882a593Smuzhiyun {
179*4882a593Smuzhiyun 	return vcpu->arch.hflags & HF_SMM_MASK;
180*4882a593Smuzhiyun }
181*4882a593Smuzhiyun 
182*4882a593Smuzhiyun #endif
183