1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Debug and Guest Debug support
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * Copyright (C) 2015 - Linaro Ltd
6*4882a593Smuzhiyun * Author: Alex Bennée <alex.bennee@linaro.org>
7*4882a593Smuzhiyun */
8*4882a593Smuzhiyun
9*4882a593Smuzhiyun #include <linux/kvm_host.h>
10*4882a593Smuzhiyun #include <linux/hw_breakpoint.h>
11*4882a593Smuzhiyun
12*4882a593Smuzhiyun #include <asm/debug-monitors.h>
13*4882a593Smuzhiyun #include <asm/kvm_asm.h>
14*4882a593Smuzhiyun #include <asm/kvm_arm.h>
15*4882a593Smuzhiyun #include <asm/kvm_emulate.h>
16*4882a593Smuzhiyun
17*4882a593Smuzhiyun #include "trace.h"
18*4882a593Smuzhiyun
19*4882a593Smuzhiyun /* These are the bits of MDSCR_EL1 we may manipulate */
20*4882a593Smuzhiyun #define MDSCR_EL1_DEBUG_MASK (DBG_MDSCR_SS | \
21*4882a593Smuzhiyun DBG_MDSCR_KDE | \
22*4882a593Smuzhiyun DBG_MDSCR_MDE)
23*4882a593Smuzhiyun
24*4882a593Smuzhiyun static DEFINE_PER_CPU(u32, mdcr_el2);
25*4882a593Smuzhiyun
26*4882a593Smuzhiyun /**
27*4882a593Smuzhiyun * save/restore_guest_debug_regs
28*4882a593Smuzhiyun *
29*4882a593Smuzhiyun * For some debug operations we need to tweak some guest registers. As
30*4882a593Smuzhiyun * a result we need to save the state of those registers before we
31*4882a593Smuzhiyun * make those modifications.
32*4882a593Smuzhiyun *
33*4882a593Smuzhiyun * Guest access to MDSCR_EL1 is trapped by the hypervisor and handled
34*4882a593Smuzhiyun * after we have restored the preserved value to the main context.
35*4882a593Smuzhiyun */
save_guest_debug_regs(struct kvm_vcpu * vcpu)36*4882a593Smuzhiyun static void save_guest_debug_regs(struct kvm_vcpu *vcpu)
37*4882a593Smuzhiyun {
38*4882a593Smuzhiyun u64 val = vcpu_read_sys_reg(vcpu, MDSCR_EL1);
39*4882a593Smuzhiyun
40*4882a593Smuzhiyun vcpu->arch.guest_debug_preserved.mdscr_el1 = val;
41*4882a593Smuzhiyun
42*4882a593Smuzhiyun trace_kvm_arm_set_dreg32("Saved MDSCR_EL1",
43*4882a593Smuzhiyun vcpu->arch.guest_debug_preserved.mdscr_el1);
44*4882a593Smuzhiyun }
45*4882a593Smuzhiyun
restore_guest_debug_regs(struct kvm_vcpu * vcpu)46*4882a593Smuzhiyun static void restore_guest_debug_regs(struct kvm_vcpu *vcpu)
47*4882a593Smuzhiyun {
48*4882a593Smuzhiyun u64 val = vcpu->arch.guest_debug_preserved.mdscr_el1;
49*4882a593Smuzhiyun
50*4882a593Smuzhiyun vcpu_write_sys_reg(vcpu, val, MDSCR_EL1);
51*4882a593Smuzhiyun
52*4882a593Smuzhiyun trace_kvm_arm_set_dreg32("Restored MDSCR_EL1",
53*4882a593Smuzhiyun vcpu_read_sys_reg(vcpu, MDSCR_EL1));
54*4882a593Smuzhiyun }
55*4882a593Smuzhiyun
56*4882a593Smuzhiyun /**
57*4882a593Smuzhiyun * kvm_arm_init_debug - grab what we need for debug
58*4882a593Smuzhiyun *
59*4882a593Smuzhiyun * Currently the sole task of this function is to retrieve the initial
60*4882a593Smuzhiyun * value of mdcr_el2 so we can preserve MDCR_EL2.HPMN which has
61*4882a593Smuzhiyun * presumably been set-up by some knowledgeable bootcode.
62*4882a593Smuzhiyun *
63*4882a593Smuzhiyun * It is called once per-cpu during CPU hyp initialisation.
64*4882a593Smuzhiyun */
65*4882a593Smuzhiyun
kvm_arm_init_debug(void)66*4882a593Smuzhiyun void kvm_arm_init_debug(void)
67*4882a593Smuzhiyun {
68*4882a593Smuzhiyun __this_cpu_write(mdcr_el2, kvm_call_hyp_ret(__kvm_get_mdcr_el2));
69*4882a593Smuzhiyun }
70*4882a593Smuzhiyun
71*4882a593Smuzhiyun /**
72*4882a593Smuzhiyun * kvm_arm_setup_mdcr_el2 - configure vcpu mdcr_el2 value
73*4882a593Smuzhiyun *
74*4882a593Smuzhiyun * @vcpu: the vcpu pointer
75*4882a593Smuzhiyun *
76*4882a593Smuzhiyun * This ensures we will trap access to:
77*4882a593Smuzhiyun * - Performance monitors (MDCR_EL2_TPM/MDCR_EL2_TPMCR)
78*4882a593Smuzhiyun * - Debug ROM Address (MDCR_EL2_TDRA)
79*4882a593Smuzhiyun * - OS related registers (MDCR_EL2_TDOSA)
80*4882a593Smuzhiyun * - Statistical profiler (MDCR_EL2_TPMS/MDCR_EL2_E2PB)
81*4882a593Smuzhiyun * - Self-hosted Trace Filter controls (MDCR_EL2_TTRF)
82*4882a593Smuzhiyun */
kvm_arm_setup_mdcr_el2(struct kvm_vcpu * vcpu)83*4882a593Smuzhiyun static void kvm_arm_setup_mdcr_el2(struct kvm_vcpu *vcpu)
84*4882a593Smuzhiyun {
85*4882a593Smuzhiyun /*
86*4882a593Smuzhiyun * This also clears MDCR_EL2_E2PB_MASK to disable guest access
87*4882a593Smuzhiyun * to the profiling buffer.
88*4882a593Smuzhiyun */
89*4882a593Smuzhiyun vcpu->arch.mdcr_el2 = __this_cpu_read(mdcr_el2) & MDCR_EL2_HPMN_MASK;
90*4882a593Smuzhiyun vcpu->arch.mdcr_el2 |= (MDCR_EL2_TPM |
91*4882a593Smuzhiyun MDCR_EL2_TPMS |
92*4882a593Smuzhiyun MDCR_EL2_TTRF |
93*4882a593Smuzhiyun MDCR_EL2_TPMCR |
94*4882a593Smuzhiyun MDCR_EL2_TDRA |
95*4882a593Smuzhiyun MDCR_EL2_TDOSA);
96*4882a593Smuzhiyun
97*4882a593Smuzhiyun /* Is the VM being debugged by userspace? */
98*4882a593Smuzhiyun if (vcpu->guest_debug)
99*4882a593Smuzhiyun /* Route all software debug exceptions to EL2 */
100*4882a593Smuzhiyun vcpu->arch.mdcr_el2 |= MDCR_EL2_TDE;
101*4882a593Smuzhiyun
102*4882a593Smuzhiyun /*
103*4882a593Smuzhiyun * Trap debug register access when one of the following is true:
104*4882a593Smuzhiyun * - Userspace is using the hardware to debug the guest
105*4882a593Smuzhiyun * (KVM_GUESTDBG_USE_HW is set).
106*4882a593Smuzhiyun * - The guest is not using debug (KVM_ARM64_DEBUG_DIRTY is clear).
107*4882a593Smuzhiyun */
108*4882a593Smuzhiyun if ((vcpu->guest_debug & KVM_GUESTDBG_USE_HW) ||
109*4882a593Smuzhiyun !(vcpu->arch.flags & KVM_ARM64_DEBUG_DIRTY))
110*4882a593Smuzhiyun vcpu->arch.mdcr_el2 |= MDCR_EL2_TDA;
111*4882a593Smuzhiyun
112*4882a593Smuzhiyun trace_kvm_arm_set_dreg32("MDCR_EL2", vcpu->arch.mdcr_el2);
113*4882a593Smuzhiyun }
114*4882a593Smuzhiyun
115*4882a593Smuzhiyun /**
116*4882a593Smuzhiyun * kvm_arm_vcpu_init_debug - setup vcpu debug traps
117*4882a593Smuzhiyun *
118*4882a593Smuzhiyun * @vcpu: the vcpu pointer
119*4882a593Smuzhiyun *
120*4882a593Smuzhiyun * Set vcpu initial mdcr_el2 value.
121*4882a593Smuzhiyun */
kvm_arm_vcpu_init_debug(struct kvm_vcpu * vcpu)122*4882a593Smuzhiyun void kvm_arm_vcpu_init_debug(struct kvm_vcpu *vcpu)
123*4882a593Smuzhiyun {
124*4882a593Smuzhiyun preempt_disable();
125*4882a593Smuzhiyun kvm_arm_setup_mdcr_el2(vcpu);
126*4882a593Smuzhiyun preempt_enable();
127*4882a593Smuzhiyun }
128*4882a593Smuzhiyun
129*4882a593Smuzhiyun /**
130*4882a593Smuzhiyun * kvm_arm_reset_debug_ptr - reset the debug ptr to point to the vcpu state
131*4882a593Smuzhiyun */
132*4882a593Smuzhiyun
kvm_arm_reset_debug_ptr(struct kvm_vcpu * vcpu)133*4882a593Smuzhiyun void kvm_arm_reset_debug_ptr(struct kvm_vcpu *vcpu)
134*4882a593Smuzhiyun {
135*4882a593Smuzhiyun vcpu->arch.debug_ptr = &vcpu->arch.vcpu_debug_state;
136*4882a593Smuzhiyun }
137*4882a593Smuzhiyun
138*4882a593Smuzhiyun /**
139*4882a593Smuzhiyun * kvm_arm_setup_debug - set up debug related stuff
140*4882a593Smuzhiyun *
141*4882a593Smuzhiyun * @vcpu: the vcpu pointer
142*4882a593Smuzhiyun *
143*4882a593Smuzhiyun * This is called before each entry into the hypervisor to setup any
144*4882a593Smuzhiyun * debug related registers. Currently this just ensures we will trap
145*4882a593Smuzhiyun * access to:
146*4882a593Smuzhiyun * - Performance monitors (MDCR_EL2_TPM/MDCR_EL2_TPMCR)
147*4882a593Smuzhiyun * - Debug ROM Address (MDCR_EL2_TDRA)
148*4882a593Smuzhiyun * - OS related registers (MDCR_EL2_TDOSA)
149*4882a593Smuzhiyun * - Statistical profiler (MDCR_EL2_TPMS/MDCR_EL2_E2PB)
150*4882a593Smuzhiyun * - Self-hosted Trace (MDCR_EL2_TTRF/MDCR_EL2_E2TB)
151*4882a593Smuzhiyun *
152*4882a593Smuzhiyun * Additionally, KVM only traps guest accesses to the debug registers if
153*4882a593Smuzhiyun * the guest is not actively using them (see the KVM_ARM64_DEBUG_DIRTY
154*4882a593Smuzhiyun * flag on vcpu->arch.flags). Since the guest must not interfere
155*4882a593Smuzhiyun * with the hardware state when debugging the guest, we must ensure that
156*4882a593Smuzhiyun * trapping is enabled whenever we are debugging the guest using the
157*4882a593Smuzhiyun * debug registers.
158*4882a593Smuzhiyun */
159*4882a593Smuzhiyun
kvm_arm_setup_debug(struct kvm_vcpu * vcpu)160*4882a593Smuzhiyun void kvm_arm_setup_debug(struct kvm_vcpu *vcpu)
161*4882a593Smuzhiyun {
162*4882a593Smuzhiyun unsigned long mdscr, orig_mdcr_el2 = vcpu->arch.mdcr_el2;
163*4882a593Smuzhiyun
164*4882a593Smuzhiyun trace_kvm_arm_setup_debug(vcpu, vcpu->guest_debug);
165*4882a593Smuzhiyun
166*4882a593Smuzhiyun kvm_arm_setup_mdcr_el2(vcpu);
167*4882a593Smuzhiyun
168*4882a593Smuzhiyun /* Is Guest debugging in effect? */
169*4882a593Smuzhiyun if (vcpu->guest_debug) {
170*4882a593Smuzhiyun /* Save guest debug state */
171*4882a593Smuzhiyun save_guest_debug_regs(vcpu);
172*4882a593Smuzhiyun
173*4882a593Smuzhiyun /*
174*4882a593Smuzhiyun * Single Step (ARM ARM D2.12.3 The software step state
175*4882a593Smuzhiyun * machine)
176*4882a593Smuzhiyun *
177*4882a593Smuzhiyun * If we are doing Single Step we need to manipulate
178*4882a593Smuzhiyun * the guest's MDSCR_EL1.SS and PSTATE.SS. Once the
179*4882a593Smuzhiyun * step has occurred the hypervisor will trap the
180*4882a593Smuzhiyun * debug exception and we return to userspace.
181*4882a593Smuzhiyun *
182*4882a593Smuzhiyun * If the guest attempts to single step its userspace
183*4882a593Smuzhiyun * we would have to deal with a trapped exception
184*4882a593Smuzhiyun * while in the guest kernel. Because this would be
185*4882a593Smuzhiyun * hard to unwind we suppress the guest's ability to
186*4882a593Smuzhiyun * do so by masking MDSCR_EL.SS.
187*4882a593Smuzhiyun *
188*4882a593Smuzhiyun * This confuses guest debuggers which use
189*4882a593Smuzhiyun * single-step behind the scenes but everything
190*4882a593Smuzhiyun * returns to normal once the host is no longer
191*4882a593Smuzhiyun * debugging the system.
192*4882a593Smuzhiyun */
193*4882a593Smuzhiyun if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) {
194*4882a593Smuzhiyun *vcpu_cpsr(vcpu) |= DBG_SPSR_SS;
195*4882a593Smuzhiyun mdscr = vcpu_read_sys_reg(vcpu, MDSCR_EL1);
196*4882a593Smuzhiyun mdscr |= DBG_MDSCR_SS;
197*4882a593Smuzhiyun vcpu_write_sys_reg(vcpu, mdscr, MDSCR_EL1);
198*4882a593Smuzhiyun } else {
199*4882a593Smuzhiyun mdscr = vcpu_read_sys_reg(vcpu, MDSCR_EL1);
200*4882a593Smuzhiyun mdscr &= ~DBG_MDSCR_SS;
201*4882a593Smuzhiyun vcpu_write_sys_reg(vcpu, mdscr, MDSCR_EL1);
202*4882a593Smuzhiyun }
203*4882a593Smuzhiyun
204*4882a593Smuzhiyun trace_kvm_arm_set_dreg32("SPSR_EL2", *vcpu_cpsr(vcpu));
205*4882a593Smuzhiyun
206*4882a593Smuzhiyun /*
207*4882a593Smuzhiyun * HW Breakpoints and watchpoints
208*4882a593Smuzhiyun *
209*4882a593Smuzhiyun * We simply switch the debug_ptr to point to our new
210*4882a593Smuzhiyun * external_debug_state which has been populated by the
211*4882a593Smuzhiyun * debug ioctl. The existing KVM_ARM64_DEBUG_DIRTY
212*4882a593Smuzhiyun * mechanism ensures the registers are updated on the
213*4882a593Smuzhiyun * world switch.
214*4882a593Smuzhiyun */
215*4882a593Smuzhiyun if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW) {
216*4882a593Smuzhiyun /* Enable breakpoints/watchpoints */
217*4882a593Smuzhiyun mdscr = vcpu_read_sys_reg(vcpu, MDSCR_EL1);
218*4882a593Smuzhiyun mdscr |= DBG_MDSCR_MDE;
219*4882a593Smuzhiyun vcpu_write_sys_reg(vcpu, mdscr, MDSCR_EL1);
220*4882a593Smuzhiyun
221*4882a593Smuzhiyun vcpu->arch.debug_ptr = &vcpu->arch.external_debug_state;
222*4882a593Smuzhiyun vcpu->arch.flags |= KVM_ARM64_DEBUG_DIRTY;
223*4882a593Smuzhiyun
224*4882a593Smuzhiyun trace_kvm_arm_set_regset("BKPTS", get_num_brps(),
225*4882a593Smuzhiyun &vcpu->arch.debug_ptr->dbg_bcr[0],
226*4882a593Smuzhiyun &vcpu->arch.debug_ptr->dbg_bvr[0]);
227*4882a593Smuzhiyun
228*4882a593Smuzhiyun trace_kvm_arm_set_regset("WAPTS", get_num_wrps(),
229*4882a593Smuzhiyun &vcpu->arch.debug_ptr->dbg_wcr[0],
230*4882a593Smuzhiyun &vcpu->arch.debug_ptr->dbg_wvr[0]);
231*4882a593Smuzhiyun }
232*4882a593Smuzhiyun }
233*4882a593Smuzhiyun
234*4882a593Smuzhiyun BUG_ON(!vcpu->guest_debug &&
235*4882a593Smuzhiyun vcpu->arch.debug_ptr != &vcpu->arch.vcpu_debug_state);
236*4882a593Smuzhiyun
237*4882a593Smuzhiyun /* If KDE or MDE are set, perform a full save/restore cycle. */
238*4882a593Smuzhiyun if (vcpu_read_sys_reg(vcpu, MDSCR_EL1) & (DBG_MDSCR_KDE | DBG_MDSCR_MDE))
239*4882a593Smuzhiyun vcpu->arch.flags |= KVM_ARM64_DEBUG_DIRTY;
240*4882a593Smuzhiyun
241*4882a593Smuzhiyun /* Write mdcr_el2 changes since vcpu_load on VHE systems */
242*4882a593Smuzhiyun if (has_vhe() && orig_mdcr_el2 != vcpu->arch.mdcr_el2)
243*4882a593Smuzhiyun write_sysreg(vcpu->arch.mdcr_el2, mdcr_el2);
244*4882a593Smuzhiyun
245*4882a593Smuzhiyun trace_kvm_arm_set_dreg32("MDSCR_EL1", vcpu_read_sys_reg(vcpu, MDSCR_EL1));
246*4882a593Smuzhiyun }
247*4882a593Smuzhiyun
kvm_arm_clear_debug(struct kvm_vcpu * vcpu)248*4882a593Smuzhiyun void kvm_arm_clear_debug(struct kvm_vcpu *vcpu)
249*4882a593Smuzhiyun {
250*4882a593Smuzhiyun trace_kvm_arm_clear_debug(vcpu->guest_debug);
251*4882a593Smuzhiyun
252*4882a593Smuzhiyun if (vcpu->guest_debug) {
253*4882a593Smuzhiyun restore_guest_debug_regs(vcpu);
254*4882a593Smuzhiyun
255*4882a593Smuzhiyun /*
256*4882a593Smuzhiyun * If we were using HW debug we need to restore the
257*4882a593Smuzhiyun * debug_ptr to the guest debug state.
258*4882a593Smuzhiyun */
259*4882a593Smuzhiyun if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW) {
260*4882a593Smuzhiyun kvm_arm_reset_debug_ptr(vcpu);
261*4882a593Smuzhiyun
262*4882a593Smuzhiyun trace_kvm_arm_set_regset("BKPTS", get_num_brps(),
263*4882a593Smuzhiyun &vcpu->arch.debug_ptr->dbg_bcr[0],
264*4882a593Smuzhiyun &vcpu->arch.debug_ptr->dbg_bvr[0]);
265*4882a593Smuzhiyun
266*4882a593Smuzhiyun trace_kvm_arm_set_regset("WAPTS", get_num_wrps(),
267*4882a593Smuzhiyun &vcpu->arch.debug_ptr->dbg_wcr[0],
268*4882a593Smuzhiyun &vcpu->arch.debug_ptr->dbg_wvr[0]);
269*4882a593Smuzhiyun }
270*4882a593Smuzhiyun }
271*4882a593Smuzhiyun }
272*4882a593Smuzhiyun
kvm_arch_vcpu_load_debug_state_flags(struct kvm_vcpu * vcpu)273*4882a593Smuzhiyun void kvm_arch_vcpu_load_debug_state_flags(struct kvm_vcpu *vcpu)
274*4882a593Smuzhiyun {
275*4882a593Smuzhiyun u64 dfr0;
276*4882a593Smuzhiyun
277*4882a593Smuzhiyun /* For VHE, there is nothing to do */
278*4882a593Smuzhiyun if (has_vhe())
279*4882a593Smuzhiyun return;
280*4882a593Smuzhiyun
281*4882a593Smuzhiyun dfr0 = read_sysreg(id_aa64dfr0_el1);
282*4882a593Smuzhiyun /*
283*4882a593Smuzhiyun * If SPE is present on this CPU and is available at current EL,
284*4882a593Smuzhiyun * we may need to check if the host state needs to be saved.
285*4882a593Smuzhiyun */
286*4882a593Smuzhiyun if (cpuid_feature_extract_unsigned_field(dfr0, ID_AA64DFR0_PMSVER_SHIFT) &&
287*4882a593Smuzhiyun !(read_sysreg_s(SYS_PMBIDR_EL1) & BIT(SYS_PMBIDR_EL1_P_SHIFT)))
288*4882a593Smuzhiyun vcpu->arch.flags |= KVM_ARM64_DEBUG_STATE_SAVE_SPE;
289*4882a593Smuzhiyun
290*4882a593Smuzhiyun /* Check if we have TRBE implemented and available at the host */
291*4882a593Smuzhiyun if (cpuid_feature_extract_unsigned_field(dfr0, ID_AA64DFR0_TRBE_SHIFT) &&
292*4882a593Smuzhiyun !(read_sysreg_s(SYS_TRBIDR_EL1) & TRBIDR_PROG))
293*4882a593Smuzhiyun vcpu->arch.flags |= KVM_ARM64_DEBUG_STATE_SAVE_TRBE;
294*4882a593Smuzhiyun }
295*4882a593Smuzhiyun
kvm_arch_vcpu_put_debug_state_flags(struct kvm_vcpu * vcpu)296*4882a593Smuzhiyun void kvm_arch_vcpu_put_debug_state_flags(struct kvm_vcpu *vcpu)
297*4882a593Smuzhiyun {
298*4882a593Smuzhiyun vcpu->arch.flags &= ~(KVM_ARM64_DEBUG_STATE_SAVE_SPE |
299*4882a593Smuzhiyun KVM_ARM64_DEBUG_STATE_SAVE_TRBE);
300*4882a593Smuzhiyun }
301