1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Copyright (C) 2012,2013 - ARM Ltd
4*4882a593Smuzhiyun * Author: Marc Zyngier <marc.zyngier@arm.com>
5*4882a593Smuzhiyun *
6*4882a593Smuzhiyun * Derived from arch/arm/kvm/handle_exit.c:
7*4882a593Smuzhiyun * Copyright (C) 2012 - Virtual Open Systems and Columbia University
8*4882a593Smuzhiyun * Author: Christoffer Dall <c.dall@virtualopensystems.com>
9*4882a593Smuzhiyun */
10*4882a593Smuzhiyun
11*4882a593Smuzhiyun #include <linux/kvm.h>
12*4882a593Smuzhiyun #include <linux/kvm_host.h>
13*4882a593Smuzhiyun
14*4882a593Smuzhiyun #include <asm/esr.h>
15*4882a593Smuzhiyun #include <asm/exception.h>
16*4882a593Smuzhiyun #include <asm/kvm_asm.h>
17*4882a593Smuzhiyun #include <asm/kvm_emulate.h>
18*4882a593Smuzhiyun #include <asm/kvm_mmu.h>
19*4882a593Smuzhiyun #include <asm/debug-monitors.h>
20*4882a593Smuzhiyun #include <asm/traps.h>
21*4882a593Smuzhiyun
22*4882a593Smuzhiyun #include <kvm/arm_hypercalls.h>
23*4882a593Smuzhiyun
24*4882a593Smuzhiyun #define CREATE_TRACE_POINTS
25*4882a593Smuzhiyun #include "trace_handle_exit.h"
26*4882a593Smuzhiyun
27*4882a593Smuzhiyun typedef int (*exit_handle_fn)(struct kvm_vcpu *);
28*4882a593Smuzhiyun
kvm_handle_guest_serror(struct kvm_vcpu * vcpu,u32 esr)29*4882a593Smuzhiyun static void kvm_handle_guest_serror(struct kvm_vcpu *vcpu, u32 esr)
30*4882a593Smuzhiyun {
31*4882a593Smuzhiyun if (!arm64_is_ras_serror(esr) || arm64_is_fatal_ras_serror(NULL, esr))
32*4882a593Smuzhiyun kvm_inject_vabt(vcpu);
33*4882a593Smuzhiyun }
34*4882a593Smuzhiyun
handle_hvc(struct kvm_vcpu * vcpu)35*4882a593Smuzhiyun static int handle_hvc(struct kvm_vcpu *vcpu)
36*4882a593Smuzhiyun {
37*4882a593Smuzhiyun int ret;
38*4882a593Smuzhiyun
39*4882a593Smuzhiyun trace_kvm_hvc_arm64(*vcpu_pc(vcpu), vcpu_get_reg(vcpu, 0),
40*4882a593Smuzhiyun kvm_vcpu_hvc_get_imm(vcpu));
41*4882a593Smuzhiyun vcpu->stat.hvc_exit_stat++;
42*4882a593Smuzhiyun
43*4882a593Smuzhiyun ret = kvm_hvc_call_handler(vcpu);
44*4882a593Smuzhiyun if (ret < 0) {
45*4882a593Smuzhiyun vcpu_set_reg(vcpu, 0, ~0UL);
46*4882a593Smuzhiyun return 1;
47*4882a593Smuzhiyun }
48*4882a593Smuzhiyun
49*4882a593Smuzhiyun return ret;
50*4882a593Smuzhiyun }
51*4882a593Smuzhiyun
handle_smc(struct kvm_vcpu * vcpu)52*4882a593Smuzhiyun static int handle_smc(struct kvm_vcpu *vcpu)
53*4882a593Smuzhiyun {
54*4882a593Smuzhiyun /*
55*4882a593Smuzhiyun * "If an SMC instruction executed at Non-secure EL1 is
56*4882a593Smuzhiyun * trapped to EL2 because HCR_EL2.TSC is 1, the exception is a
57*4882a593Smuzhiyun * Trap exception, not a Secure Monitor Call exception [...]"
58*4882a593Smuzhiyun *
59*4882a593Smuzhiyun * We need to advance the PC after the trap, as it would
60*4882a593Smuzhiyun * otherwise return to the same address...
61*4882a593Smuzhiyun */
62*4882a593Smuzhiyun vcpu_set_reg(vcpu, 0, ~0UL);
63*4882a593Smuzhiyun kvm_incr_pc(vcpu);
64*4882a593Smuzhiyun return 1;
65*4882a593Smuzhiyun }
66*4882a593Smuzhiyun
67*4882a593Smuzhiyun /*
68*4882a593Smuzhiyun * Guest access to FP/ASIMD registers are routed to this handler only
69*4882a593Smuzhiyun * when the system doesn't support FP/ASIMD.
70*4882a593Smuzhiyun */
handle_no_fpsimd(struct kvm_vcpu * vcpu)71*4882a593Smuzhiyun static int handle_no_fpsimd(struct kvm_vcpu *vcpu)
72*4882a593Smuzhiyun {
73*4882a593Smuzhiyun kvm_inject_undefined(vcpu);
74*4882a593Smuzhiyun return 1;
75*4882a593Smuzhiyun }
76*4882a593Smuzhiyun
77*4882a593Smuzhiyun /**
78*4882a593Smuzhiyun * kvm_handle_wfx - handle a wait-for-interrupts or wait-for-event
79*4882a593Smuzhiyun * instruction executed by a guest
80*4882a593Smuzhiyun *
81*4882a593Smuzhiyun * @vcpu: the vcpu pointer
82*4882a593Smuzhiyun *
83*4882a593Smuzhiyun * WFE: Yield the CPU and come back to this vcpu when the scheduler
84*4882a593Smuzhiyun * decides to.
85*4882a593Smuzhiyun * WFI: Simply call kvm_vcpu_block(), which will halt execution of
86*4882a593Smuzhiyun * world-switches and schedule other host processes until there is an
87*4882a593Smuzhiyun * incoming IRQ or FIQ to the VM.
88*4882a593Smuzhiyun */
kvm_handle_wfx(struct kvm_vcpu * vcpu)89*4882a593Smuzhiyun static int kvm_handle_wfx(struct kvm_vcpu *vcpu)
90*4882a593Smuzhiyun {
91*4882a593Smuzhiyun if (kvm_vcpu_get_esr(vcpu) & ESR_ELx_WFx_ISS_WFE) {
92*4882a593Smuzhiyun trace_kvm_wfx_arm64(*vcpu_pc(vcpu), true);
93*4882a593Smuzhiyun vcpu->stat.wfe_exit_stat++;
94*4882a593Smuzhiyun kvm_vcpu_on_spin(vcpu, vcpu_mode_priv(vcpu));
95*4882a593Smuzhiyun } else {
96*4882a593Smuzhiyun trace_kvm_wfx_arm64(*vcpu_pc(vcpu), false);
97*4882a593Smuzhiyun vcpu->stat.wfi_exit_stat++;
98*4882a593Smuzhiyun kvm_vcpu_block(vcpu);
99*4882a593Smuzhiyun kvm_clear_request(KVM_REQ_UNHALT, vcpu);
100*4882a593Smuzhiyun }
101*4882a593Smuzhiyun
102*4882a593Smuzhiyun kvm_incr_pc(vcpu);
103*4882a593Smuzhiyun
104*4882a593Smuzhiyun return 1;
105*4882a593Smuzhiyun }
106*4882a593Smuzhiyun
107*4882a593Smuzhiyun /**
108*4882a593Smuzhiyun * kvm_handle_guest_debug - handle a debug exception instruction
109*4882a593Smuzhiyun *
110*4882a593Smuzhiyun * @vcpu: the vcpu pointer
111*4882a593Smuzhiyun *
112*4882a593Smuzhiyun * We route all debug exceptions through the same handler. If both the
113*4882a593Smuzhiyun * guest and host are using the same debug facilities it will be up to
114*4882a593Smuzhiyun * userspace to re-inject the correct exception for guest delivery.
115*4882a593Smuzhiyun *
116*4882a593Smuzhiyun * @return: 0 (while setting vcpu->run->exit_reason), -1 for error
117*4882a593Smuzhiyun */
kvm_handle_guest_debug(struct kvm_vcpu * vcpu)118*4882a593Smuzhiyun static int kvm_handle_guest_debug(struct kvm_vcpu *vcpu)
119*4882a593Smuzhiyun {
120*4882a593Smuzhiyun struct kvm_run *run = vcpu->run;
121*4882a593Smuzhiyun u32 esr = kvm_vcpu_get_esr(vcpu);
122*4882a593Smuzhiyun int ret = 0;
123*4882a593Smuzhiyun
124*4882a593Smuzhiyun run->exit_reason = KVM_EXIT_DEBUG;
125*4882a593Smuzhiyun run->debug.arch.hsr = esr;
126*4882a593Smuzhiyun
127*4882a593Smuzhiyun switch (ESR_ELx_EC(esr)) {
128*4882a593Smuzhiyun case ESR_ELx_EC_WATCHPT_LOW:
129*4882a593Smuzhiyun run->debug.arch.far = vcpu->arch.fault.far_el2;
130*4882a593Smuzhiyun fallthrough;
131*4882a593Smuzhiyun case ESR_ELx_EC_SOFTSTP_LOW:
132*4882a593Smuzhiyun case ESR_ELx_EC_BREAKPT_LOW:
133*4882a593Smuzhiyun case ESR_ELx_EC_BKPT32:
134*4882a593Smuzhiyun case ESR_ELx_EC_BRK64:
135*4882a593Smuzhiyun break;
136*4882a593Smuzhiyun default:
137*4882a593Smuzhiyun kvm_err("%s: un-handled case esr: %#08x\n",
138*4882a593Smuzhiyun __func__, (unsigned int) esr);
139*4882a593Smuzhiyun ret = -1;
140*4882a593Smuzhiyun break;
141*4882a593Smuzhiyun }
142*4882a593Smuzhiyun
143*4882a593Smuzhiyun return ret;
144*4882a593Smuzhiyun }
145*4882a593Smuzhiyun
kvm_handle_unknown_ec(struct kvm_vcpu * vcpu)146*4882a593Smuzhiyun static int kvm_handle_unknown_ec(struct kvm_vcpu *vcpu)
147*4882a593Smuzhiyun {
148*4882a593Smuzhiyun u32 esr = kvm_vcpu_get_esr(vcpu);
149*4882a593Smuzhiyun
150*4882a593Smuzhiyun kvm_pr_unimpl("Unknown exception class: esr: %#08x -- %s\n",
151*4882a593Smuzhiyun esr, esr_get_class_string(esr));
152*4882a593Smuzhiyun
153*4882a593Smuzhiyun kvm_inject_undefined(vcpu);
154*4882a593Smuzhiyun return 1;
155*4882a593Smuzhiyun }
156*4882a593Smuzhiyun
handle_sve(struct kvm_vcpu * vcpu)157*4882a593Smuzhiyun static int handle_sve(struct kvm_vcpu *vcpu)
158*4882a593Smuzhiyun {
159*4882a593Smuzhiyun /* Until SVE is supported for guests: */
160*4882a593Smuzhiyun kvm_inject_undefined(vcpu);
161*4882a593Smuzhiyun return 1;
162*4882a593Smuzhiyun }
163*4882a593Smuzhiyun
164*4882a593Smuzhiyun /*
165*4882a593Smuzhiyun * Guest usage of a ptrauth instruction (which the guest EL1 did not turn into
166*4882a593Smuzhiyun * a NOP). If we get here, it is that we didn't fixup ptrauth on exit, and all
167*4882a593Smuzhiyun * that we can do is give the guest an UNDEF.
168*4882a593Smuzhiyun */
kvm_handle_ptrauth(struct kvm_vcpu * vcpu)169*4882a593Smuzhiyun static int kvm_handle_ptrauth(struct kvm_vcpu *vcpu)
170*4882a593Smuzhiyun {
171*4882a593Smuzhiyun kvm_inject_undefined(vcpu);
172*4882a593Smuzhiyun return 1;
173*4882a593Smuzhiyun }
174*4882a593Smuzhiyun
175*4882a593Smuzhiyun static exit_handle_fn arm_exit_handlers[] = {
176*4882a593Smuzhiyun [0 ... ESR_ELx_EC_MAX] = kvm_handle_unknown_ec,
177*4882a593Smuzhiyun [ESR_ELx_EC_WFx] = kvm_handle_wfx,
178*4882a593Smuzhiyun [ESR_ELx_EC_CP15_32] = kvm_handle_cp15_32,
179*4882a593Smuzhiyun [ESR_ELx_EC_CP15_64] = kvm_handle_cp15_64,
180*4882a593Smuzhiyun [ESR_ELx_EC_CP14_MR] = kvm_handle_cp14_32,
181*4882a593Smuzhiyun [ESR_ELx_EC_CP14_LS] = kvm_handle_cp14_load_store,
182*4882a593Smuzhiyun [ESR_ELx_EC_CP14_64] = kvm_handle_cp14_64,
183*4882a593Smuzhiyun [ESR_ELx_EC_HVC32] = handle_hvc,
184*4882a593Smuzhiyun [ESR_ELx_EC_SMC32] = handle_smc,
185*4882a593Smuzhiyun [ESR_ELx_EC_HVC64] = handle_hvc,
186*4882a593Smuzhiyun [ESR_ELx_EC_SMC64] = handle_smc,
187*4882a593Smuzhiyun [ESR_ELx_EC_SYS64] = kvm_handle_sys_reg,
188*4882a593Smuzhiyun [ESR_ELx_EC_SVE] = handle_sve,
189*4882a593Smuzhiyun [ESR_ELx_EC_IABT_LOW] = kvm_handle_guest_abort,
190*4882a593Smuzhiyun [ESR_ELx_EC_DABT_LOW] = kvm_handle_guest_abort,
191*4882a593Smuzhiyun [ESR_ELx_EC_SOFTSTP_LOW]= kvm_handle_guest_debug,
192*4882a593Smuzhiyun [ESR_ELx_EC_WATCHPT_LOW]= kvm_handle_guest_debug,
193*4882a593Smuzhiyun [ESR_ELx_EC_BREAKPT_LOW]= kvm_handle_guest_debug,
194*4882a593Smuzhiyun [ESR_ELx_EC_BKPT32] = kvm_handle_guest_debug,
195*4882a593Smuzhiyun [ESR_ELx_EC_BRK64] = kvm_handle_guest_debug,
196*4882a593Smuzhiyun [ESR_ELx_EC_FP_ASIMD] = handle_no_fpsimd,
197*4882a593Smuzhiyun [ESR_ELx_EC_PAC] = kvm_handle_ptrauth,
198*4882a593Smuzhiyun };
199*4882a593Smuzhiyun
kvm_get_exit_handler(struct kvm_vcpu * vcpu)200*4882a593Smuzhiyun static exit_handle_fn kvm_get_exit_handler(struct kvm_vcpu *vcpu)
201*4882a593Smuzhiyun {
202*4882a593Smuzhiyun u32 esr = kvm_vcpu_get_esr(vcpu);
203*4882a593Smuzhiyun u8 esr_ec = ESR_ELx_EC(esr);
204*4882a593Smuzhiyun
205*4882a593Smuzhiyun return arm_exit_handlers[esr_ec];
206*4882a593Smuzhiyun }
207*4882a593Smuzhiyun
208*4882a593Smuzhiyun /*
209*4882a593Smuzhiyun * We may be single-stepping an emulated instruction. If the emulation
210*4882a593Smuzhiyun * has been completed in the kernel, we can return to userspace with a
211*4882a593Smuzhiyun * KVM_EXIT_DEBUG, otherwise userspace needs to complete its
212*4882a593Smuzhiyun * emulation first.
213*4882a593Smuzhiyun */
handle_trap_exceptions(struct kvm_vcpu * vcpu)214*4882a593Smuzhiyun static int handle_trap_exceptions(struct kvm_vcpu *vcpu)
215*4882a593Smuzhiyun {
216*4882a593Smuzhiyun int handled;
217*4882a593Smuzhiyun
218*4882a593Smuzhiyun /*
219*4882a593Smuzhiyun * See ARM ARM B1.14.1: "Hyp traps on instructions
220*4882a593Smuzhiyun * that fail their condition code check"
221*4882a593Smuzhiyun */
222*4882a593Smuzhiyun if (!kvm_condition_valid(vcpu)) {
223*4882a593Smuzhiyun kvm_incr_pc(vcpu);
224*4882a593Smuzhiyun handled = 1;
225*4882a593Smuzhiyun } else {
226*4882a593Smuzhiyun exit_handle_fn exit_handler;
227*4882a593Smuzhiyun
228*4882a593Smuzhiyun exit_handler = kvm_get_exit_handler(vcpu);
229*4882a593Smuzhiyun handled = exit_handler(vcpu);
230*4882a593Smuzhiyun }
231*4882a593Smuzhiyun
232*4882a593Smuzhiyun return handled;
233*4882a593Smuzhiyun }
234*4882a593Smuzhiyun
235*4882a593Smuzhiyun /*
236*4882a593Smuzhiyun * Return > 0 to return to guest, < 0 on error, 0 (and set exit_reason) on
237*4882a593Smuzhiyun * proper exit to userspace.
238*4882a593Smuzhiyun */
handle_exit(struct kvm_vcpu * vcpu,int exception_index)239*4882a593Smuzhiyun int handle_exit(struct kvm_vcpu *vcpu, int exception_index)
240*4882a593Smuzhiyun {
241*4882a593Smuzhiyun struct kvm_run *run = vcpu->run;
242*4882a593Smuzhiyun
243*4882a593Smuzhiyun if (ARM_SERROR_PENDING(exception_index)) {
244*4882a593Smuzhiyun /*
245*4882a593Smuzhiyun * The SError is handled by handle_exit_early(). If the guest
246*4882a593Smuzhiyun * survives it will re-execute the original instruction.
247*4882a593Smuzhiyun */
248*4882a593Smuzhiyun return 1;
249*4882a593Smuzhiyun }
250*4882a593Smuzhiyun
251*4882a593Smuzhiyun exception_index = ARM_EXCEPTION_CODE(exception_index);
252*4882a593Smuzhiyun
253*4882a593Smuzhiyun switch (exception_index) {
254*4882a593Smuzhiyun case ARM_EXCEPTION_IRQ:
255*4882a593Smuzhiyun return 1;
256*4882a593Smuzhiyun case ARM_EXCEPTION_EL1_SERROR:
257*4882a593Smuzhiyun return 1;
258*4882a593Smuzhiyun case ARM_EXCEPTION_TRAP:
259*4882a593Smuzhiyun return handle_trap_exceptions(vcpu);
260*4882a593Smuzhiyun case ARM_EXCEPTION_HYP_GONE:
261*4882a593Smuzhiyun /*
262*4882a593Smuzhiyun * EL2 has been reset to the hyp-stub. This happens when a guest
263*4882a593Smuzhiyun * is pre-empted by kvm_reboot()'s shutdown call.
264*4882a593Smuzhiyun */
265*4882a593Smuzhiyun run->exit_reason = KVM_EXIT_FAIL_ENTRY;
266*4882a593Smuzhiyun return 0;
267*4882a593Smuzhiyun case ARM_EXCEPTION_IL:
268*4882a593Smuzhiyun /*
269*4882a593Smuzhiyun * We attempted an illegal exception return. Guest state must
270*4882a593Smuzhiyun * have been corrupted somehow. Give up.
271*4882a593Smuzhiyun */
272*4882a593Smuzhiyun run->exit_reason = KVM_EXIT_FAIL_ENTRY;
273*4882a593Smuzhiyun return -EINVAL;
274*4882a593Smuzhiyun default:
275*4882a593Smuzhiyun kvm_pr_unimpl("Unsupported exception type: %d",
276*4882a593Smuzhiyun exception_index);
277*4882a593Smuzhiyun run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
278*4882a593Smuzhiyun return 0;
279*4882a593Smuzhiyun }
280*4882a593Smuzhiyun }
281*4882a593Smuzhiyun
282*4882a593Smuzhiyun /* For exit types that need handling before we can be preempted */
handle_exit_early(struct kvm_vcpu * vcpu,int exception_index)283*4882a593Smuzhiyun void handle_exit_early(struct kvm_vcpu *vcpu, int exception_index)
284*4882a593Smuzhiyun {
285*4882a593Smuzhiyun if (ARM_SERROR_PENDING(exception_index)) {
286*4882a593Smuzhiyun if (this_cpu_has_cap(ARM64_HAS_RAS_EXTN)) {
287*4882a593Smuzhiyun u64 disr = kvm_vcpu_get_disr(vcpu);
288*4882a593Smuzhiyun
289*4882a593Smuzhiyun kvm_handle_guest_serror(vcpu, disr_to_esr(disr));
290*4882a593Smuzhiyun } else {
291*4882a593Smuzhiyun kvm_inject_vabt(vcpu);
292*4882a593Smuzhiyun }
293*4882a593Smuzhiyun
294*4882a593Smuzhiyun return;
295*4882a593Smuzhiyun }
296*4882a593Smuzhiyun
297*4882a593Smuzhiyun exception_index = ARM_EXCEPTION_CODE(exception_index);
298*4882a593Smuzhiyun
299*4882a593Smuzhiyun if (exception_index == ARM_EXCEPTION_EL1_SERROR)
300*4882a593Smuzhiyun kvm_handle_guest_serror(vcpu, kvm_vcpu_get_esr(vcpu));
301*4882a593Smuzhiyun }
302