xref: /OK3568_Linux_fs/kernel/arch/arm64/kvm/hyp/vhe/switch.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Copyright (C) 2015 - ARM Ltd
4*4882a593Smuzhiyun  * Author: Marc Zyngier <marc.zyngier@arm.com>
5*4882a593Smuzhiyun  */
6*4882a593Smuzhiyun 
7*4882a593Smuzhiyun #include <hyp/adjust_pc.h>
8*4882a593Smuzhiyun #include <hyp/switch.h>
9*4882a593Smuzhiyun 
10*4882a593Smuzhiyun #include <linux/arm-smccc.h>
11*4882a593Smuzhiyun #include <linux/kvm_host.h>
12*4882a593Smuzhiyun #include <linux/types.h>
13*4882a593Smuzhiyun #include <linux/jump_label.h>
14*4882a593Smuzhiyun #include <linux/percpu.h>
15*4882a593Smuzhiyun #include <uapi/linux/psci.h>
16*4882a593Smuzhiyun 
17*4882a593Smuzhiyun #include <kvm/arm_psci.h>
18*4882a593Smuzhiyun 
19*4882a593Smuzhiyun #include <asm/barrier.h>
20*4882a593Smuzhiyun #include <asm/cpufeature.h>
21*4882a593Smuzhiyun #include <asm/kprobes.h>
22*4882a593Smuzhiyun #include <asm/kvm_asm.h>
23*4882a593Smuzhiyun #include <asm/kvm_emulate.h>
24*4882a593Smuzhiyun #include <asm/kvm_hyp.h>
25*4882a593Smuzhiyun #include <asm/kvm_mmu.h>
26*4882a593Smuzhiyun #include <asm/fpsimd.h>
27*4882a593Smuzhiyun #include <asm/debug-monitors.h>
28*4882a593Smuzhiyun #include <asm/processor.h>
29*4882a593Smuzhiyun #include <asm/thread_info.h>
30*4882a593Smuzhiyun #include <asm/vectors.h>
31*4882a593Smuzhiyun 
32*4882a593Smuzhiyun const char __hyp_panic_string[] = "HYP panic:\nPS:%08llx PC:%016llx ESR:%08llx\nFAR:%016llx HPFAR:%016llx PAR:%016llx\nVCPU:%p\n";
33*4882a593Smuzhiyun 
34*4882a593Smuzhiyun /* VHE specific context */
35*4882a593Smuzhiyun DEFINE_PER_CPU(struct kvm_host_data, kvm_host_data);
36*4882a593Smuzhiyun DEFINE_PER_CPU(struct kvm_cpu_context, kvm_hyp_ctxt);
37*4882a593Smuzhiyun DEFINE_PER_CPU(unsigned long, kvm_hyp_vector);
38*4882a593Smuzhiyun 
__activate_traps(struct kvm_vcpu * vcpu)39*4882a593Smuzhiyun static void __activate_traps(struct kvm_vcpu *vcpu)
40*4882a593Smuzhiyun {
41*4882a593Smuzhiyun 	u64 val;
42*4882a593Smuzhiyun 
43*4882a593Smuzhiyun 	___activate_traps(vcpu);
44*4882a593Smuzhiyun 
45*4882a593Smuzhiyun 	val = read_sysreg(cpacr_el1);
46*4882a593Smuzhiyun 	val |= CPACR_EL1_TTA;
47*4882a593Smuzhiyun 	val &= ~CPACR_EL1_ZEN;
48*4882a593Smuzhiyun 
49*4882a593Smuzhiyun 	/*
50*4882a593Smuzhiyun 	 * With VHE (HCR.E2H == 1), accesses to CPACR_EL1 are routed to
51*4882a593Smuzhiyun 	 * CPTR_EL2. In general, CPACR_EL1 has the same layout as CPTR_EL2,
52*4882a593Smuzhiyun 	 * except for some missing controls, such as TAM.
53*4882a593Smuzhiyun 	 * In this case, CPTR_EL2.TAM has the same position with or without
54*4882a593Smuzhiyun 	 * VHE (HCR.E2H == 1) which allows us to use here the CPTR_EL2.TAM
55*4882a593Smuzhiyun 	 * shift value for trapping the AMU accesses.
56*4882a593Smuzhiyun 	 */
57*4882a593Smuzhiyun 
58*4882a593Smuzhiyun 	val |= CPTR_EL2_TAM;
59*4882a593Smuzhiyun 
60*4882a593Smuzhiyun 	if (update_fp_enabled(vcpu)) {
61*4882a593Smuzhiyun 		if (vcpu_has_sve(vcpu))
62*4882a593Smuzhiyun 			val |= CPACR_EL1_ZEN;
63*4882a593Smuzhiyun 	} else {
64*4882a593Smuzhiyun 		val &= ~CPACR_EL1_FPEN;
65*4882a593Smuzhiyun 		__activate_traps_fpsimd32(vcpu);
66*4882a593Smuzhiyun 	}
67*4882a593Smuzhiyun 
68*4882a593Smuzhiyun 	write_sysreg(val, cpacr_el1);
69*4882a593Smuzhiyun 
70*4882a593Smuzhiyun 	write_sysreg(__this_cpu_read(kvm_hyp_vector), vbar_el1);
71*4882a593Smuzhiyun }
72*4882a593Smuzhiyun NOKPROBE_SYMBOL(__activate_traps);
73*4882a593Smuzhiyun 
__deactivate_traps(struct kvm_vcpu * vcpu)74*4882a593Smuzhiyun static void __deactivate_traps(struct kvm_vcpu *vcpu)
75*4882a593Smuzhiyun {
76*4882a593Smuzhiyun 	const char *host_vectors = vectors;
77*4882a593Smuzhiyun 
78*4882a593Smuzhiyun 	___deactivate_traps(vcpu);
79*4882a593Smuzhiyun 
80*4882a593Smuzhiyun 	write_sysreg(HCR_HOST_VHE_FLAGS, hcr_el2);
81*4882a593Smuzhiyun 
82*4882a593Smuzhiyun 	/*
83*4882a593Smuzhiyun 	 * ARM errata 1165522 and 1530923 require the actual execution of the
84*4882a593Smuzhiyun 	 * above before we can switch to the EL2/EL0 translation regime used by
85*4882a593Smuzhiyun 	 * the host.
86*4882a593Smuzhiyun 	 */
87*4882a593Smuzhiyun 	asm(ALTERNATIVE("nop", "isb", ARM64_WORKAROUND_SPECULATIVE_AT));
88*4882a593Smuzhiyun 
89*4882a593Smuzhiyun 	write_sysreg(CPACR_EL1_DEFAULT, cpacr_el1);
90*4882a593Smuzhiyun 
91*4882a593Smuzhiyun 	if (!arm64_kernel_unmapped_at_el0())
92*4882a593Smuzhiyun 		host_vectors = __this_cpu_read(this_cpu_vector);
93*4882a593Smuzhiyun 	write_sysreg(host_vectors, vbar_el1);
94*4882a593Smuzhiyun }
95*4882a593Smuzhiyun NOKPROBE_SYMBOL(__deactivate_traps);
96*4882a593Smuzhiyun 
activate_traps_vhe_load(struct kvm_vcpu * vcpu)97*4882a593Smuzhiyun void activate_traps_vhe_load(struct kvm_vcpu *vcpu)
98*4882a593Smuzhiyun {
99*4882a593Smuzhiyun 	__activate_traps_common(vcpu);
100*4882a593Smuzhiyun }
101*4882a593Smuzhiyun 
deactivate_traps_vhe_put(void)102*4882a593Smuzhiyun void deactivate_traps_vhe_put(void)
103*4882a593Smuzhiyun {
104*4882a593Smuzhiyun 	u64 mdcr_el2 = read_sysreg(mdcr_el2);
105*4882a593Smuzhiyun 
106*4882a593Smuzhiyun 	mdcr_el2 &= MDCR_EL2_HPMN_MASK |
107*4882a593Smuzhiyun 		    MDCR_EL2_E2PB_MASK << MDCR_EL2_E2PB_SHIFT |
108*4882a593Smuzhiyun 		    MDCR_EL2_TPMS;
109*4882a593Smuzhiyun 
110*4882a593Smuzhiyun 	write_sysreg(mdcr_el2, mdcr_el2);
111*4882a593Smuzhiyun 
112*4882a593Smuzhiyun 	__deactivate_traps_common();
113*4882a593Smuzhiyun }
114*4882a593Smuzhiyun 
115*4882a593Smuzhiyun /* Switch to the guest for VHE systems running in EL2 */
__kvm_vcpu_run_vhe(struct kvm_vcpu * vcpu)116*4882a593Smuzhiyun static int __kvm_vcpu_run_vhe(struct kvm_vcpu *vcpu)
117*4882a593Smuzhiyun {
118*4882a593Smuzhiyun 	struct kvm_cpu_context *host_ctxt;
119*4882a593Smuzhiyun 	struct kvm_cpu_context *guest_ctxt;
120*4882a593Smuzhiyun 	u64 exit_code;
121*4882a593Smuzhiyun 
122*4882a593Smuzhiyun 	host_ctxt = &this_cpu_ptr(&kvm_host_data)->host_ctxt;
123*4882a593Smuzhiyun 	host_ctxt->__hyp_running_vcpu = vcpu;
124*4882a593Smuzhiyun 	guest_ctxt = &vcpu->arch.ctxt;
125*4882a593Smuzhiyun 
126*4882a593Smuzhiyun 	sysreg_save_host_state_vhe(host_ctxt);
127*4882a593Smuzhiyun 
128*4882a593Smuzhiyun 	/*
129*4882a593Smuzhiyun 	 * ARM erratum 1165522 requires us to configure both stage 1 and
130*4882a593Smuzhiyun 	 * stage 2 translation for the guest context before we clear
131*4882a593Smuzhiyun 	 * HCR_EL2.TGE.
132*4882a593Smuzhiyun 	 *
133*4882a593Smuzhiyun 	 * We have already configured the guest's stage 1 translation in
134*4882a593Smuzhiyun 	 * kvm_vcpu_load_sysregs_vhe above.  We must now call
135*4882a593Smuzhiyun 	 * __load_guest_stage2 before __activate_traps, because
136*4882a593Smuzhiyun 	 * __load_guest_stage2 configures stage 2 translation, and
137*4882a593Smuzhiyun 	 * __activate_traps clear HCR_EL2.TGE (among other things).
138*4882a593Smuzhiyun 	 */
139*4882a593Smuzhiyun 	__load_guest_stage2(vcpu->arch.hw_mmu);
140*4882a593Smuzhiyun 	__activate_traps(vcpu);
141*4882a593Smuzhiyun 
142*4882a593Smuzhiyun 	__adjust_pc(vcpu);
143*4882a593Smuzhiyun 
144*4882a593Smuzhiyun 	sysreg_restore_guest_state_vhe(guest_ctxt);
145*4882a593Smuzhiyun 	__debug_switch_to_guest(vcpu);
146*4882a593Smuzhiyun 
147*4882a593Smuzhiyun 	do {
148*4882a593Smuzhiyun 		/* Jump in the fire! */
149*4882a593Smuzhiyun 		exit_code = __guest_enter(vcpu);
150*4882a593Smuzhiyun 
151*4882a593Smuzhiyun 		/* And we're baaack! */
152*4882a593Smuzhiyun 	} while (fixup_guest_exit(vcpu, &exit_code));
153*4882a593Smuzhiyun 
154*4882a593Smuzhiyun 	sysreg_save_guest_state_vhe(guest_ctxt);
155*4882a593Smuzhiyun 
156*4882a593Smuzhiyun 	__deactivate_traps(vcpu);
157*4882a593Smuzhiyun 
158*4882a593Smuzhiyun 	sysreg_restore_host_state_vhe(host_ctxt);
159*4882a593Smuzhiyun 
160*4882a593Smuzhiyun 	if (vcpu->arch.flags & KVM_ARM64_FP_ENABLED)
161*4882a593Smuzhiyun 		__fpsimd_save_fpexc32(vcpu);
162*4882a593Smuzhiyun 
163*4882a593Smuzhiyun 	__debug_switch_to_host(vcpu);
164*4882a593Smuzhiyun 
165*4882a593Smuzhiyun 	return exit_code;
166*4882a593Smuzhiyun }
167*4882a593Smuzhiyun NOKPROBE_SYMBOL(__kvm_vcpu_run_vhe);
168*4882a593Smuzhiyun 
__kvm_vcpu_run(struct kvm_vcpu * vcpu)169*4882a593Smuzhiyun int __kvm_vcpu_run(struct kvm_vcpu *vcpu)
170*4882a593Smuzhiyun {
171*4882a593Smuzhiyun 	int ret;
172*4882a593Smuzhiyun 
173*4882a593Smuzhiyun 	local_daif_mask();
174*4882a593Smuzhiyun 
175*4882a593Smuzhiyun 	/*
176*4882a593Smuzhiyun 	 * Having IRQs masked via PMR when entering the guest means the GIC
177*4882a593Smuzhiyun 	 * will not signal the CPU of interrupts of lower priority, and the
178*4882a593Smuzhiyun 	 * only way to get out will be via guest exceptions.
179*4882a593Smuzhiyun 	 * Naturally, we want to avoid this.
180*4882a593Smuzhiyun 	 *
181*4882a593Smuzhiyun 	 * local_daif_mask() already sets GIC_PRIO_PSR_I_SET, we just need a
182*4882a593Smuzhiyun 	 * dsb to ensure the redistributor is forwards EL2 IRQs to the CPU.
183*4882a593Smuzhiyun 	 */
184*4882a593Smuzhiyun 	pmr_sync();
185*4882a593Smuzhiyun 
186*4882a593Smuzhiyun 	ret = __kvm_vcpu_run_vhe(vcpu);
187*4882a593Smuzhiyun 
188*4882a593Smuzhiyun 	/*
189*4882a593Smuzhiyun 	 * local_daif_restore() takes care to properly restore PSTATE.DAIF
190*4882a593Smuzhiyun 	 * and the GIC PMR if the host is using IRQ priorities.
191*4882a593Smuzhiyun 	 */
192*4882a593Smuzhiyun 	local_daif_restore(DAIF_PROCCTX_NOIRQ);
193*4882a593Smuzhiyun 
194*4882a593Smuzhiyun 	/*
195*4882a593Smuzhiyun 	 * When we exit from the guest we change a number of CPU configuration
196*4882a593Smuzhiyun 	 * parameters, such as traps.  Make sure these changes take effect
197*4882a593Smuzhiyun 	 * before running the host or additional guests.
198*4882a593Smuzhiyun 	 */
199*4882a593Smuzhiyun 	isb();
200*4882a593Smuzhiyun 
201*4882a593Smuzhiyun 	return ret;
202*4882a593Smuzhiyun }
203*4882a593Smuzhiyun 
__hyp_call_panic(u64 spsr,u64 elr,u64 par)204*4882a593Smuzhiyun static void __hyp_call_panic(u64 spsr, u64 elr, u64 par)
205*4882a593Smuzhiyun {
206*4882a593Smuzhiyun 	struct kvm_cpu_context *host_ctxt;
207*4882a593Smuzhiyun 	struct kvm_vcpu *vcpu;
208*4882a593Smuzhiyun 
209*4882a593Smuzhiyun 	host_ctxt = &this_cpu_ptr(&kvm_host_data)->host_ctxt;
210*4882a593Smuzhiyun 	vcpu = host_ctxt->__hyp_running_vcpu;
211*4882a593Smuzhiyun 
212*4882a593Smuzhiyun 	__deactivate_traps(vcpu);
213*4882a593Smuzhiyun 	sysreg_restore_host_state_vhe(host_ctxt);
214*4882a593Smuzhiyun 
215*4882a593Smuzhiyun 	panic(__hyp_panic_string,
216*4882a593Smuzhiyun 	      spsr, elr,
217*4882a593Smuzhiyun 	      read_sysreg_el2(SYS_ESR), read_sysreg_el2(SYS_FAR),
218*4882a593Smuzhiyun 	      read_sysreg(hpfar_el2), par, vcpu);
219*4882a593Smuzhiyun }
220*4882a593Smuzhiyun NOKPROBE_SYMBOL(__hyp_call_panic);
221*4882a593Smuzhiyun 
hyp_panic(void)222*4882a593Smuzhiyun void __noreturn hyp_panic(void)
223*4882a593Smuzhiyun {
224*4882a593Smuzhiyun 	u64 spsr = read_sysreg_el2(SYS_SPSR);
225*4882a593Smuzhiyun 	u64 elr = read_sysreg_el2(SYS_ELR);
226*4882a593Smuzhiyun 	u64 par = read_sysreg_par();
227*4882a593Smuzhiyun 
228*4882a593Smuzhiyun 	__hyp_call_panic(spsr, elr, par);
229*4882a593Smuzhiyun 	unreachable();
230*4882a593Smuzhiyun }
231*4882a593Smuzhiyun 
kvm_unexpected_el2_exception(void)232*4882a593Smuzhiyun asmlinkage void kvm_unexpected_el2_exception(void)
233*4882a593Smuzhiyun {
234*4882a593Smuzhiyun 	__kvm_unexpected_el2_exception();
235*4882a593Smuzhiyun }
236