xref: /OK3568_Linux_fs/kernel/arch/arm64/include/asm/kvm_emulate.h (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0-only */
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Copyright (C) 2012,2013 - ARM Ltd
4*4882a593Smuzhiyun  * Author: Marc Zyngier <marc.zyngier@arm.com>
5*4882a593Smuzhiyun  *
6*4882a593Smuzhiyun  * Derived from arch/arm/include/kvm_emulate.h
7*4882a593Smuzhiyun  * Copyright (C) 2012 - Virtual Open Systems and Columbia University
8*4882a593Smuzhiyun  * Author: Christoffer Dall <c.dall@virtualopensystems.com>
9*4882a593Smuzhiyun  */
10*4882a593Smuzhiyun 
11*4882a593Smuzhiyun #ifndef __ARM64_KVM_EMULATE_H__
12*4882a593Smuzhiyun #define __ARM64_KVM_EMULATE_H__
13*4882a593Smuzhiyun 
14*4882a593Smuzhiyun #include <linux/kvm_host.h>
15*4882a593Smuzhiyun 
16*4882a593Smuzhiyun #include <asm/debug-monitors.h>
17*4882a593Smuzhiyun #include <asm/esr.h>
18*4882a593Smuzhiyun #include <asm/kvm_arm.h>
19*4882a593Smuzhiyun #include <asm/kvm_hyp.h>
20*4882a593Smuzhiyun #include <asm/ptrace.h>
21*4882a593Smuzhiyun #include <asm/cputype.h>
22*4882a593Smuzhiyun #include <asm/virt.h>
23*4882a593Smuzhiyun 
24*4882a593Smuzhiyun #define CURRENT_EL_SP_EL0_VECTOR	0x0
25*4882a593Smuzhiyun #define CURRENT_EL_SP_ELx_VECTOR	0x200
26*4882a593Smuzhiyun #define LOWER_EL_AArch64_VECTOR		0x400
27*4882a593Smuzhiyun #define LOWER_EL_AArch32_VECTOR		0x600
28*4882a593Smuzhiyun 
29*4882a593Smuzhiyun enum exception_type {
30*4882a593Smuzhiyun 	except_type_sync	= 0,
31*4882a593Smuzhiyun 	except_type_irq		= 0x80,
32*4882a593Smuzhiyun 	except_type_fiq		= 0x100,
33*4882a593Smuzhiyun 	except_type_serror	= 0x180,
34*4882a593Smuzhiyun };
35*4882a593Smuzhiyun 
36*4882a593Smuzhiyun bool kvm_condition_valid32(const struct kvm_vcpu *vcpu);
37*4882a593Smuzhiyun void kvm_skip_instr32(struct kvm_vcpu *vcpu);
38*4882a593Smuzhiyun 
39*4882a593Smuzhiyun void kvm_inject_undefined(struct kvm_vcpu *vcpu);
40*4882a593Smuzhiyun void kvm_inject_vabt(struct kvm_vcpu *vcpu);
41*4882a593Smuzhiyun void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr);
42*4882a593Smuzhiyun void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr);
43*4882a593Smuzhiyun 
vcpu_el1_is_32bit(struct kvm_vcpu * vcpu)44*4882a593Smuzhiyun static __always_inline bool vcpu_el1_is_32bit(struct kvm_vcpu *vcpu)
45*4882a593Smuzhiyun {
46*4882a593Smuzhiyun 	return !(vcpu->arch.hcr_el2 & HCR_RW);
47*4882a593Smuzhiyun }
48*4882a593Smuzhiyun 
vcpu_reset_hcr(struct kvm_vcpu * vcpu)49*4882a593Smuzhiyun static inline void vcpu_reset_hcr(struct kvm_vcpu *vcpu)
50*4882a593Smuzhiyun {
51*4882a593Smuzhiyun 	vcpu->arch.hcr_el2 = HCR_GUEST_FLAGS;
52*4882a593Smuzhiyun 	if (is_kernel_in_hyp_mode())
53*4882a593Smuzhiyun 		vcpu->arch.hcr_el2 |= HCR_E2H;
54*4882a593Smuzhiyun 	if (cpus_have_const_cap(ARM64_HAS_RAS_EXTN)) {
55*4882a593Smuzhiyun 		/* route synchronous external abort exceptions to EL2 */
56*4882a593Smuzhiyun 		vcpu->arch.hcr_el2 |= HCR_TEA;
57*4882a593Smuzhiyun 		/* trap error record accesses */
58*4882a593Smuzhiyun 		vcpu->arch.hcr_el2 |= HCR_TERR;
59*4882a593Smuzhiyun 	}
60*4882a593Smuzhiyun 
61*4882a593Smuzhiyun 	if (cpus_have_const_cap(ARM64_HAS_STAGE2_FWB)) {
62*4882a593Smuzhiyun 		vcpu->arch.hcr_el2 |= HCR_FWB;
63*4882a593Smuzhiyun 	} else {
64*4882a593Smuzhiyun 		/*
65*4882a593Smuzhiyun 		 * For non-FWB CPUs, we trap VM ops (HCR_EL2.TVM) until M+C
66*4882a593Smuzhiyun 		 * get set in SCTLR_EL1 such that we can detect when the guest
67*4882a593Smuzhiyun 		 * MMU gets turned on and do the necessary cache maintenance
68*4882a593Smuzhiyun 		 * then.
69*4882a593Smuzhiyun 		 */
70*4882a593Smuzhiyun 		vcpu->arch.hcr_el2 |= HCR_TVM;
71*4882a593Smuzhiyun 	}
72*4882a593Smuzhiyun 
73*4882a593Smuzhiyun 	if (test_bit(KVM_ARM_VCPU_EL1_32BIT, vcpu->arch.features))
74*4882a593Smuzhiyun 		vcpu->arch.hcr_el2 &= ~HCR_RW;
75*4882a593Smuzhiyun 
76*4882a593Smuzhiyun 	/*
77*4882a593Smuzhiyun 	 * TID3: trap feature register accesses that we virtualise.
78*4882a593Smuzhiyun 	 * For now this is conditional, since no AArch32 feature regs
79*4882a593Smuzhiyun 	 * are currently virtualised.
80*4882a593Smuzhiyun 	 */
81*4882a593Smuzhiyun 	if (!vcpu_el1_is_32bit(vcpu))
82*4882a593Smuzhiyun 		vcpu->arch.hcr_el2 |= HCR_TID3;
83*4882a593Smuzhiyun 
84*4882a593Smuzhiyun 	if (cpus_have_const_cap(ARM64_MISMATCHED_CACHE_TYPE) ||
85*4882a593Smuzhiyun 	    vcpu_el1_is_32bit(vcpu))
86*4882a593Smuzhiyun 		vcpu->arch.hcr_el2 |= HCR_TID2;
87*4882a593Smuzhiyun }
88*4882a593Smuzhiyun 
vcpu_hcr(struct kvm_vcpu * vcpu)89*4882a593Smuzhiyun static inline unsigned long *vcpu_hcr(struct kvm_vcpu *vcpu)
90*4882a593Smuzhiyun {
91*4882a593Smuzhiyun 	return (unsigned long *)&vcpu->arch.hcr_el2;
92*4882a593Smuzhiyun }
93*4882a593Smuzhiyun 
vcpu_clear_wfx_traps(struct kvm_vcpu * vcpu)94*4882a593Smuzhiyun static inline void vcpu_clear_wfx_traps(struct kvm_vcpu *vcpu)
95*4882a593Smuzhiyun {
96*4882a593Smuzhiyun 	vcpu->arch.hcr_el2 &= ~HCR_TWE;
97*4882a593Smuzhiyun 	if (atomic_read(&vcpu->arch.vgic_cpu.vgic_v3.its_vpe.vlpi_count) ||
98*4882a593Smuzhiyun 	    vcpu->kvm->arch.vgic.nassgireq)
99*4882a593Smuzhiyun 		vcpu->arch.hcr_el2 &= ~HCR_TWI;
100*4882a593Smuzhiyun 	else
101*4882a593Smuzhiyun 		vcpu->arch.hcr_el2 |= HCR_TWI;
102*4882a593Smuzhiyun }
103*4882a593Smuzhiyun 
vcpu_set_wfx_traps(struct kvm_vcpu * vcpu)104*4882a593Smuzhiyun static inline void vcpu_set_wfx_traps(struct kvm_vcpu *vcpu)
105*4882a593Smuzhiyun {
106*4882a593Smuzhiyun 	vcpu->arch.hcr_el2 |= HCR_TWE;
107*4882a593Smuzhiyun 	vcpu->arch.hcr_el2 |= HCR_TWI;
108*4882a593Smuzhiyun }
109*4882a593Smuzhiyun 
vcpu_ptrauth_enable(struct kvm_vcpu * vcpu)110*4882a593Smuzhiyun static inline void vcpu_ptrauth_enable(struct kvm_vcpu *vcpu)
111*4882a593Smuzhiyun {
112*4882a593Smuzhiyun 	vcpu->arch.hcr_el2 |= (HCR_API | HCR_APK);
113*4882a593Smuzhiyun }
114*4882a593Smuzhiyun 
vcpu_ptrauth_disable(struct kvm_vcpu * vcpu)115*4882a593Smuzhiyun static inline void vcpu_ptrauth_disable(struct kvm_vcpu *vcpu)
116*4882a593Smuzhiyun {
117*4882a593Smuzhiyun 	vcpu->arch.hcr_el2 &= ~(HCR_API | HCR_APK);
118*4882a593Smuzhiyun }
119*4882a593Smuzhiyun 
vcpu_get_vsesr(struct kvm_vcpu * vcpu)120*4882a593Smuzhiyun static inline unsigned long vcpu_get_vsesr(struct kvm_vcpu *vcpu)
121*4882a593Smuzhiyun {
122*4882a593Smuzhiyun 	return vcpu->arch.vsesr_el2;
123*4882a593Smuzhiyun }
124*4882a593Smuzhiyun 
vcpu_set_vsesr(struct kvm_vcpu * vcpu,u64 vsesr)125*4882a593Smuzhiyun static inline void vcpu_set_vsesr(struct kvm_vcpu *vcpu, u64 vsesr)
126*4882a593Smuzhiyun {
127*4882a593Smuzhiyun 	vcpu->arch.vsesr_el2 = vsesr;
128*4882a593Smuzhiyun }
129*4882a593Smuzhiyun 
vcpu_pc(const struct kvm_vcpu * vcpu)130*4882a593Smuzhiyun static __always_inline unsigned long *vcpu_pc(const struct kvm_vcpu *vcpu)
131*4882a593Smuzhiyun {
132*4882a593Smuzhiyun 	return (unsigned long *)&vcpu_gp_regs(vcpu)->pc;
133*4882a593Smuzhiyun }
134*4882a593Smuzhiyun 
vcpu_cpsr(const struct kvm_vcpu * vcpu)135*4882a593Smuzhiyun static __always_inline unsigned long *vcpu_cpsr(const struct kvm_vcpu *vcpu)
136*4882a593Smuzhiyun {
137*4882a593Smuzhiyun 	return (unsigned long *)&vcpu_gp_regs(vcpu)->pstate;
138*4882a593Smuzhiyun }
139*4882a593Smuzhiyun 
vcpu_mode_is_32bit(const struct kvm_vcpu * vcpu)140*4882a593Smuzhiyun static __always_inline bool vcpu_mode_is_32bit(const struct kvm_vcpu *vcpu)
141*4882a593Smuzhiyun {
142*4882a593Smuzhiyun 	return !!(*vcpu_cpsr(vcpu) & PSR_MODE32_BIT);
143*4882a593Smuzhiyun }
144*4882a593Smuzhiyun 
kvm_condition_valid(const struct kvm_vcpu * vcpu)145*4882a593Smuzhiyun static __always_inline bool kvm_condition_valid(const struct kvm_vcpu *vcpu)
146*4882a593Smuzhiyun {
147*4882a593Smuzhiyun 	if (vcpu_mode_is_32bit(vcpu))
148*4882a593Smuzhiyun 		return kvm_condition_valid32(vcpu);
149*4882a593Smuzhiyun 
150*4882a593Smuzhiyun 	return true;
151*4882a593Smuzhiyun }
152*4882a593Smuzhiyun 
vcpu_set_thumb(struct kvm_vcpu * vcpu)153*4882a593Smuzhiyun static inline void vcpu_set_thumb(struct kvm_vcpu *vcpu)
154*4882a593Smuzhiyun {
155*4882a593Smuzhiyun 	*vcpu_cpsr(vcpu) |= PSR_AA32_T_BIT;
156*4882a593Smuzhiyun }
157*4882a593Smuzhiyun 
158*4882a593Smuzhiyun /*
159*4882a593Smuzhiyun  * vcpu_get_reg and vcpu_set_reg should always be passed a register number
160*4882a593Smuzhiyun  * coming from a read of ESR_EL2. Otherwise, it may give the wrong result on
161*4882a593Smuzhiyun  * AArch32 with banked registers.
162*4882a593Smuzhiyun  */
vcpu_get_reg(const struct kvm_vcpu * vcpu,u8 reg_num)163*4882a593Smuzhiyun static __always_inline unsigned long vcpu_get_reg(const struct kvm_vcpu *vcpu,
164*4882a593Smuzhiyun 					 u8 reg_num)
165*4882a593Smuzhiyun {
166*4882a593Smuzhiyun 	return (reg_num == 31) ? 0 : vcpu_gp_regs(vcpu)->regs[reg_num];
167*4882a593Smuzhiyun }
168*4882a593Smuzhiyun 
vcpu_set_reg(struct kvm_vcpu * vcpu,u8 reg_num,unsigned long val)169*4882a593Smuzhiyun static __always_inline void vcpu_set_reg(struct kvm_vcpu *vcpu, u8 reg_num,
170*4882a593Smuzhiyun 				unsigned long val)
171*4882a593Smuzhiyun {
172*4882a593Smuzhiyun 	if (reg_num != 31)
173*4882a593Smuzhiyun 		vcpu_gp_regs(vcpu)->regs[reg_num] = val;
174*4882a593Smuzhiyun }
175*4882a593Smuzhiyun 
176*4882a593Smuzhiyun /*
177*4882a593Smuzhiyun  * The layout of SPSR for an AArch32 state is different when observed from an
178*4882a593Smuzhiyun  * AArch64 SPSR_ELx or an AArch32 SPSR_*. This function generates the AArch32
179*4882a593Smuzhiyun  * view given an AArch64 view.
180*4882a593Smuzhiyun  *
181*4882a593Smuzhiyun  * In ARM DDI 0487E.a see:
182*4882a593Smuzhiyun  *
183*4882a593Smuzhiyun  * - The AArch64 view (SPSR_EL2) in section C5.2.18, page C5-426
184*4882a593Smuzhiyun  * - The AArch32 view (SPSR_abt) in section G8.2.126, page G8-6256
185*4882a593Smuzhiyun  * - The AArch32 view (SPSR_und) in section G8.2.132, page G8-6280
186*4882a593Smuzhiyun  *
187*4882a593Smuzhiyun  * Which show the following differences:
188*4882a593Smuzhiyun  *
189*4882a593Smuzhiyun  * | Bit | AA64 | AA32 | Notes                       |
190*4882a593Smuzhiyun  * +-----+------+------+-----------------------------|
191*4882a593Smuzhiyun  * | 24  | DIT  | J    | J is RES0 in ARMv8          |
192*4882a593Smuzhiyun  * | 21  | SS   | DIT  | SS doesn't exist in AArch32 |
193*4882a593Smuzhiyun  *
194*4882a593Smuzhiyun  * ... and all other bits are (currently) common.
195*4882a593Smuzhiyun  */
host_spsr_to_spsr32(unsigned long spsr)196*4882a593Smuzhiyun static inline unsigned long host_spsr_to_spsr32(unsigned long spsr)
197*4882a593Smuzhiyun {
198*4882a593Smuzhiyun 	const unsigned long overlap = BIT(24) | BIT(21);
199*4882a593Smuzhiyun 	unsigned long dit = !!(spsr & PSR_AA32_DIT_BIT);
200*4882a593Smuzhiyun 
201*4882a593Smuzhiyun 	spsr &= ~overlap;
202*4882a593Smuzhiyun 
203*4882a593Smuzhiyun 	spsr |= dit << 21;
204*4882a593Smuzhiyun 
205*4882a593Smuzhiyun 	return spsr;
206*4882a593Smuzhiyun }
207*4882a593Smuzhiyun 
vcpu_mode_priv(const struct kvm_vcpu * vcpu)208*4882a593Smuzhiyun static inline bool vcpu_mode_priv(const struct kvm_vcpu *vcpu)
209*4882a593Smuzhiyun {
210*4882a593Smuzhiyun 	u32 mode;
211*4882a593Smuzhiyun 
212*4882a593Smuzhiyun 	if (vcpu_mode_is_32bit(vcpu)) {
213*4882a593Smuzhiyun 		mode = *vcpu_cpsr(vcpu) & PSR_AA32_MODE_MASK;
214*4882a593Smuzhiyun 		return mode > PSR_AA32_MODE_USR;
215*4882a593Smuzhiyun 	}
216*4882a593Smuzhiyun 
217*4882a593Smuzhiyun 	mode = *vcpu_cpsr(vcpu) & PSR_MODE_MASK;
218*4882a593Smuzhiyun 
219*4882a593Smuzhiyun 	return mode != PSR_MODE_EL0t;
220*4882a593Smuzhiyun }
221*4882a593Smuzhiyun 
kvm_vcpu_get_esr(const struct kvm_vcpu * vcpu)222*4882a593Smuzhiyun static __always_inline u32 kvm_vcpu_get_esr(const struct kvm_vcpu *vcpu)
223*4882a593Smuzhiyun {
224*4882a593Smuzhiyun 	return vcpu->arch.fault.esr_el2;
225*4882a593Smuzhiyun }
226*4882a593Smuzhiyun 
kvm_vcpu_get_condition(const struct kvm_vcpu * vcpu)227*4882a593Smuzhiyun static __always_inline int kvm_vcpu_get_condition(const struct kvm_vcpu *vcpu)
228*4882a593Smuzhiyun {
229*4882a593Smuzhiyun 	u32 esr = kvm_vcpu_get_esr(vcpu);
230*4882a593Smuzhiyun 
231*4882a593Smuzhiyun 	if (esr & ESR_ELx_CV)
232*4882a593Smuzhiyun 		return (esr & ESR_ELx_COND_MASK) >> ESR_ELx_COND_SHIFT;
233*4882a593Smuzhiyun 
234*4882a593Smuzhiyun 	return -1;
235*4882a593Smuzhiyun }
236*4882a593Smuzhiyun 
kvm_vcpu_get_hfar(const struct kvm_vcpu * vcpu)237*4882a593Smuzhiyun static __always_inline unsigned long kvm_vcpu_get_hfar(const struct kvm_vcpu *vcpu)
238*4882a593Smuzhiyun {
239*4882a593Smuzhiyun 	return vcpu->arch.fault.far_el2;
240*4882a593Smuzhiyun }
241*4882a593Smuzhiyun 
kvm_vcpu_get_fault_ipa(const struct kvm_vcpu * vcpu)242*4882a593Smuzhiyun static __always_inline phys_addr_t kvm_vcpu_get_fault_ipa(const struct kvm_vcpu *vcpu)
243*4882a593Smuzhiyun {
244*4882a593Smuzhiyun 	return ((phys_addr_t)vcpu->arch.fault.hpfar_el2 & HPFAR_MASK) << 8;
245*4882a593Smuzhiyun }
246*4882a593Smuzhiyun 
kvm_vcpu_get_disr(const struct kvm_vcpu * vcpu)247*4882a593Smuzhiyun static inline u64 kvm_vcpu_get_disr(const struct kvm_vcpu *vcpu)
248*4882a593Smuzhiyun {
249*4882a593Smuzhiyun 	return vcpu->arch.fault.disr_el1;
250*4882a593Smuzhiyun }
251*4882a593Smuzhiyun 
kvm_vcpu_hvc_get_imm(const struct kvm_vcpu * vcpu)252*4882a593Smuzhiyun static inline u32 kvm_vcpu_hvc_get_imm(const struct kvm_vcpu *vcpu)
253*4882a593Smuzhiyun {
254*4882a593Smuzhiyun 	return kvm_vcpu_get_esr(vcpu) & ESR_ELx_xVC_IMM_MASK;
255*4882a593Smuzhiyun }
256*4882a593Smuzhiyun 
kvm_vcpu_dabt_isvalid(const struct kvm_vcpu * vcpu)257*4882a593Smuzhiyun static __always_inline bool kvm_vcpu_dabt_isvalid(const struct kvm_vcpu *vcpu)
258*4882a593Smuzhiyun {
259*4882a593Smuzhiyun 	return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_ISV);
260*4882a593Smuzhiyun }
261*4882a593Smuzhiyun 
kvm_vcpu_dabt_iss_nisv_sanitized(const struct kvm_vcpu * vcpu)262*4882a593Smuzhiyun static inline unsigned long kvm_vcpu_dabt_iss_nisv_sanitized(const struct kvm_vcpu *vcpu)
263*4882a593Smuzhiyun {
264*4882a593Smuzhiyun 	return kvm_vcpu_get_esr(vcpu) & (ESR_ELx_CM | ESR_ELx_WNR | ESR_ELx_FSC);
265*4882a593Smuzhiyun }
266*4882a593Smuzhiyun 
kvm_vcpu_dabt_issext(const struct kvm_vcpu * vcpu)267*4882a593Smuzhiyun static inline bool kvm_vcpu_dabt_issext(const struct kvm_vcpu *vcpu)
268*4882a593Smuzhiyun {
269*4882a593Smuzhiyun 	return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_SSE);
270*4882a593Smuzhiyun }
271*4882a593Smuzhiyun 
kvm_vcpu_dabt_issf(const struct kvm_vcpu * vcpu)272*4882a593Smuzhiyun static inline bool kvm_vcpu_dabt_issf(const struct kvm_vcpu *vcpu)
273*4882a593Smuzhiyun {
274*4882a593Smuzhiyun 	return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_SF);
275*4882a593Smuzhiyun }
276*4882a593Smuzhiyun 
kvm_vcpu_dabt_get_rd(const struct kvm_vcpu * vcpu)277*4882a593Smuzhiyun static __always_inline int kvm_vcpu_dabt_get_rd(const struct kvm_vcpu *vcpu)
278*4882a593Smuzhiyun {
279*4882a593Smuzhiyun 	return (kvm_vcpu_get_esr(vcpu) & ESR_ELx_SRT_MASK) >> ESR_ELx_SRT_SHIFT;
280*4882a593Smuzhiyun }
281*4882a593Smuzhiyun 
kvm_vcpu_abt_iss1tw(const struct kvm_vcpu * vcpu)282*4882a593Smuzhiyun static __always_inline bool kvm_vcpu_abt_iss1tw(const struct kvm_vcpu *vcpu)
283*4882a593Smuzhiyun {
284*4882a593Smuzhiyun 	return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_S1PTW);
285*4882a593Smuzhiyun }
286*4882a593Smuzhiyun 
287*4882a593Smuzhiyun /* Always check for S1PTW *before* using this. */
kvm_vcpu_dabt_iswrite(const struct kvm_vcpu * vcpu)288*4882a593Smuzhiyun static __always_inline bool kvm_vcpu_dabt_iswrite(const struct kvm_vcpu *vcpu)
289*4882a593Smuzhiyun {
290*4882a593Smuzhiyun 	return kvm_vcpu_get_esr(vcpu) & ESR_ELx_WNR;
291*4882a593Smuzhiyun }
292*4882a593Smuzhiyun 
kvm_vcpu_dabt_is_cm(const struct kvm_vcpu * vcpu)293*4882a593Smuzhiyun static inline bool kvm_vcpu_dabt_is_cm(const struct kvm_vcpu *vcpu)
294*4882a593Smuzhiyun {
295*4882a593Smuzhiyun 	return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_CM);
296*4882a593Smuzhiyun }
297*4882a593Smuzhiyun 
kvm_vcpu_dabt_get_as(const struct kvm_vcpu * vcpu)298*4882a593Smuzhiyun static __always_inline unsigned int kvm_vcpu_dabt_get_as(const struct kvm_vcpu *vcpu)
299*4882a593Smuzhiyun {
300*4882a593Smuzhiyun 	return 1 << ((kvm_vcpu_get_esr(vcpu) & ESR_ELx_SAS) >> ESR_ELx_SAS_SHIFT);
301*4882a593Smuzhiyun }
302*4882a593Smuzhiyun 
303*4882a593Smuzhiyun /* This one is not specific to Data Abort */
kvm_vcpu_trap_il_is32bit(const struct kvm_vcpu * vcpu)304*4882a593Smuzhiyun static __always_inline bool kvm_vcpu_trap_il_is32bit(const struct kvm_vcpu *vcpu)
305*4882a593Smuzhiyun {
306*4882a593Smuzhiyun 	return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_IL);
307*4882a593Smuzhiyun }
308*4882a593Smuzhiyun 
kvm_vcpu_trap_get_class(const struct kvm_vcpu * vcpu)309*4882a593Smuzhiyun static __always_inline u8 kvm_vcpu_trap_get_class(const struct kvm_vcpu *vcpu)
310*4882a593Smuzhiyun {
311*4882a593Smuzhiyun 	return ESR_ELx_EC(kvm_vcpu_get_esr(vcpu));
312*4882a593Smuzhiyun }
313*4882a593Smuzhiyun 
kvm_vcpu_trap_is_iabt(const struct kvm_vcpu * vcpu)314*4882a593Smuzhiyun static inline bool kvm_vcpu_trap_is_iabt(const struct kvm_vcpu *vcpu)
315*4882a593Smuzhiyun {
316*4882a593Smuzhiyun 	return kvm_vcpu_trap_get_class(vcpu) == ESR_ELx_EC_IABT_LOW;
317*4882a593Smuzhiyun }
318*4882a593Smuzhiyun 
kvm_vcpu_trap_is_exec_fault(const struct kvm_vcpu * vcpu)319*4882a593Smuzhiyun static inline bool kvm_vcpu_trap_is_exec_fault(const struct kvm_vcpu *vcpu)
320*4882a593Smuzhiyun {
321*4882a593Smuzhiyun 	return kvm_vcpu_trap_is_iabt(vcpu) && !kvm_vcpu_abt_iss1tw(vcpu);
322*4882a593Smuzhiyun }
323*4882a593Smuzhiyun 
kvm_vcpu_trap_get_fault(const struct kvm_vcpu * vcpu)324*4882a593Smuzhiyun static __always_inline u8 kvm_vcpu_trap_get_fault(const struct kvm_vcpu *vcpu)
325*4882a593Smuzhiyun {
326*4882a593Smuzhiyun 	return kvm_vcpu_get_esr(vcpu) & ESR_ELx_FSC;
327*4882a593Smuzhiyun }
328*4882a593Smuzhiyun 
kvm_vcpu_trap_get_fault_type(const struct kvm_vcpu * vcpu)329*4882a593Smuzhiyun static __always_inline u8 kvm_vcpu_trap_get_fault_type(const struct kvm_vcpu *vcpu)
330*4882a593Smuzhiyun {
331*4882a593Smuzhiyun 	return kvm_vcpu_get_esr(vcpu) & ESR_ELx_FSC_TYPE;
332*4882a593Smuzhiyun }
333*4882a593Smuzhiyun 
kvm_vcpu_trap_get_fault_level(const struct kvm_vcpu * vcpu)334*4882a593Smuzhiyun static __always_inline u8 kvm_vcpu_trap_get_fault_level(const struct kvm_vcpu *vcpu)
335*4882a593Smuzhiyun {
336*4882a593Smuzhiyun 	return kvm_vcpu_get_esr(vcpu) & ESR_ELx_FSC_LEVEL;
337*4882a593Smuzhiyun }
338*4882a593Smuzhiyun 
kvm_vcpu_abt_issea(const struct kvm_vcpu * vcpu)339*4882a593Smuzhiyun static __always_inline bool kvm_vcpu_abt_issea(const struct kvm_vcpu *vcpu)
340*4882a593Smuzhiyun {
341*4882a593Smuzhiyun 	switch (kvm_vcpu_trap_get_fault(vcpu)) {
342*4882a593Smuzhiyun 	case FSC_SEA:
343*4882a593Smuzhiyun 	case FSC_SEA_TTW0:
344*4882a593Smuzhiyun 	case FSC_SEA_TTW1:
345*4882a593Smuzhiyun 	case FSC_SEA_TTW2:
346*4882a593Smuzhiyun 	case FSC_SEA_TTW3:
347*4882a593Smuzhiyun 	case FSC_SECC:
348*4882a593Smuzhiyun 	case FSC_SECC_TTW0:
349*4882a593Smuzhiyun 	case FSC_SECC_TTW1:
350*4882a593Smuzhiyun 	case FSC_SECC_TTW2:
351*4882a593Smuzhiyun 	case FSC_SECC_TTW3:
352*4882a593Smuzhiyun 		return true;
353*4882a593Smuzhiyun 	default:
354*4882a593Smuzhiyun 		return false;
355*4882a593Smuzhiyun 	}
356*4882a593Smuzhiyun }
357*4882a593Smuzhiyun 
kvm_vcpu_sys_get_rt(struct kvm_vcpu * vcpu)358*4882a593Smuzhiyun static __always_inline int kvm_vcpu_sys_get_rt(struct kvm_vcpu *vcpu)
359*4882a593Smuzhiyun {
360*4882a593Smuzhiyun 	u32 esr = kvm_vcpu_get_esr(vcpu);
361*4882a593Smuzhiyun 	return ESR_ELx_SYS64_ISS_RT(esr);
362*4882a593Smuzhiyun }
363*4882a593Smuzhiyun 
kvm_is_write_fault(struct kvm_vcpu * vcpu)364*4882a593Smuzhiyun static inline bool kvm_is_write_fault(struct kvm_vcpu *vcpu)
365*4882a593Smuzhiyun {
366*4882a593Smuzhiyun 	if (kvm_vcpu_abt_iss1tw(vcpu))
367*4882a593Smuzhiyun 		return true;
368*4882a593Smuzhiyun 
369*4882a593Smuzhiyun 	if (kvm_vcpu_trap_is_iabt(vcpu))
370*4882a593Smuzhiyun 		return false;
371*4882a593Smuzhiyun 
372*4882a593Smuzhiyun 	return kvm_vcpu_dabt_iswrite(vcpu);
373*4882a593Smuzhiyun }
374*4882a593Smuzhiyun 
kvm_vcpu_get_mpidr_aff(struct kvm_vcpu * vcpu)375*4882a593Smuzhiyun static inline unsigned long kvm_vcpu_get_mpidr_aff(struct kvm_vcpu *vcpu)
376*4882a593Smuzhiyun {
377*4882a593Smuzhiyun 	return vcpu_read_sys_reg(vcpu, MPIDR_EL1) & MPIDR_HWID_BITMASK;
378*4882a593Smuzhiyun }
379*4882a593Smuzhiyun 
kvm_vcpu_set_be(struct kvm_vcpu * vcpu)380*4882a593Smuzhiyun static inline void kvm_vcpu_set_be(struct kvm_vcpu *vcpu)
381*4882a593Smuzhiyun {
382*4882a593Smuzhiyun 	if (vcpu_mode_is_32bit(vcpu)) {
383*4882a593Smuzhiyun 		*vcpu_cpsr(vcpu) |= PSR_AA32_E_BIT;
384*4882a593Smuzhiyun 	} else {
385*4882a593Smuzhiyun 		u64 sctlr = vcpu_read_sys_reg(vcpu, SCTLR_EL1);
386*4882a593Smuzhiyun 		sctlr |= (1 << 25);
387*4882a593Smuzhiyun 		vcpu_write_sys_reg(vcpu, sctlr, SCTLR_EL1);
388*4882a593Smuzhiyun 	}
389*4882a593Smuzhiyun }
390*4882a593Smuzhiyun 
kvm_vcpu_is_be(struct kvm_vcpu * vcpu)391*4882a593Smuzhiyun static inline bool kvm_vcpu_is_be(struct kvm_vcpu *vcpu)
392*4882a593Smuzhiyun {
393*4882a593Smuzhiyun 	if (vcpu_mode_is_32bit(vcpu))
394*4882a593Smuzhiyun 		return !!(*vcpu_cpsr(vcpu) & PSR_AA32_E_BIT);
395*4882a593Smuzhiyun 
396*4882a593Smuzhiyun 	return !!(vcpu_read_sys_reg(vcpu, SCTLR_EL1) & (1 << 25));
397*4882a593Smuzhiyun }
398*4882a593Smuzhiyun 
vcpu_data_guest_to_host(struct kvm_vcpu * vcpu,unsigned long data,unsigned int len)399*4882a593Smuzhiyun static inline unsigned long vcpu_data_guest_to_host(struct kvm_vcpu *vcpu,
400*4882a593Smuzhiyun 						    unsigned long data,
401*4882a593Smuzhiyun 						    unsigned int len)
402*4882a593Smuzhiyun {
403*4882a593Smuzhiyun 	if (kvm_vcpu_is_be(vcpu)) {
404*4882a593Smuzhiyun 		switch (len) {
405*4882a593Smuzhiyun 		case 1:
406*4882a593Smuzhiyun 			return data & 0xff;
407*4882a593Smuzhiyun 		case 2:
408*4882a593Smuzhiyun 			return be16_to_cpu(data & 0xffff);
409*4882a593Smuzhiyun 		case 4:
410*4882a593Smuzhiyun 			return be32_to_cpu(data & 0xffffffff);
411*4882a593Smuzhiyun 		default:
412*4882a593Smuzhiyun 			return be64_to_cpu(data);
413*4882a593Smuzhiyun 		}
414*4882a593Smuzhiyun 	} else {
415*4882a593Smuzhiyun 		switch (len) {
416*4882a593Smuzhiyun 		case 1:
417*4882a593Smuzhiyun 			return data & 0xff;
418*4882a593Smuzhiyun 		case 2:
419*4882a593Smuzhiyun 			return le16_to_cpu(data & 0xffff);
420*4882a593Smuzhiyun 		case 4:
421*4882a593Smuzhiyun 			return le32_to_cpu(data & 0xffffffff);
422*4882a593Smuzhiyun 		default:
423*4882a593Smuzhiyun 			return le64_to_cpu(data);
424*4882a593Smuzhiyun 		}
425*4882a593Smuzhiyun 	}
426*4882a593Smuzhiyun 
427*4882a593Smuzhiyun 	return data;		/* Leave LE untouched */
428*4882a593Smuzhiyun }
429*4882a593Smuzhiyun 
vcpu_data_host_to_guest(struct kvm_vcpu * vcpu,unsigned long data,unsigned int len)430*4882a593Smuzhiyun static inline unsigned long vcpu_data_host_to_guest(struct kvm_vcpu *vcpu,
431*4882a593Smuzhiyun 						    unsigned long data,
432*4882a593Smuzhiyun 						    unsigned int len)
433*4882a593Smuzhiyun {
434*4882a593Smuzhiyun 	if (kvm_vcpu_is_be(vcpu)) {
435*4882a593Smuzhiyun 		switch (len) {
436*4882a593Smuzhiyun 		case 1:
437*4882a593Smuzhiyun 			return data & 0xff;
438*4882a593Smuzhiyun 		case 2:
439*4882a593Smuzhiyun 			return cpu_to_be16(data & 0xffff);
440*4882a593Smuzhiyun 		case 4:
441*4882a593Smuzhiyun 			return cpu_to_be32(data & 0xffffffff);
442*4882a593Smuzhiyun 		default:
443*4882a593Smuzhiyun 			return cpu_to_be64(data);
444*4882a593Smuzhiyun 		}
445*4882a593Smuzhiyun 	} else {
446*4882a593Smuzhiyun 		switch (len) {
447*4882a593Smuzhiyun 		case 1:
448*4882a593Smuzhiyun 			return data & 0xff;
449*4882a593Smuzhiyun 		case 2:
450*4882a593Smuzhiyun 			return cpu_to_le16(data & 0xffff);
451*4882a593Smuzhiyun 		case 4:
452*4882a593Smuzhiyun 			return cpu_to_le32(data & 0xffffffff);
453*4882a593Smuzhiyun 		default:
454*4882a593Smuzhiyun 			return cpu_to_le64(data);
455*4882a593Smuzhiyun 		}
456*4882a593Smuzhiyun 	}
457*4882a593Smuzhiyun 
458*4882a593Smuzhiyun 	return data;		/* Leave LE untouched */
459*4882a593Smuzhiyun }
460*4882a593Smuzhiyun 
kvm_incr_pc(struct kvm_vcpu * vcpu)461*4882a593Smuzhiyun static __always_inline void kvm_incr_pc(struct kvm_vcpu *vcpu)
462*4882a593Smuzhiyun {
463*4882a593Smuzhiyun 	vcpu->arch.flags |= KVM_ARM64_INCREMENT_PC;
464*4882a593Smuzhiyun }
465*4882a593Smuzhiyun 
vcpu_has_feature(struct kvm_vcpu * vcpu,int feature)466*4882a593Smuzhiyun static inline bool vcpu_has_feature(struct kvm_vcpu *vcpu, int feature)
467*4882a593Smuzhiyun {
468*4882a593Smuzhiyun 	return test_bit(feature, vcpu->arch.features);
469*4882a593Smuzhiyun }
470*4882a593Smuzhiyun 
471*4882a593Smuzhiyun #endif /* __ARM64_KVM_EMULATE_H__ */
472