1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0-only */
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Copyright (C) 2012,2013 - ARM Ltd
4*4882a593Smuzhiyun * Author: Marc Zyngier <marc.zyngier@arm.com>
5*4882a593Smuzhiyun *
6*4882a593Smuzhiyun * Derived from arch/arm/include/asm/kvm_host.h:
7*4882a593Smuzhiyun * Copyright (C) 2012 - Virtual Open Systems and Columbia University
8*4882a593Smuzhiyun * Author: Christoffer Dall <c.dall@virtualopensystems.com>
9*4882a593Smuzhiyun */
10*4882a593Smuzhiyun
11*4882a593Smuzhiyun #ifndef __ARM64_KVM_HOST_H__
12*4882a593Smuzhiyun #define __ARM64_KVM_HOST_H__
13*4882a593Smuzhiyun
14*4882a593Smuzhiyun #include <linux/arm-smccc.h>
15*4882a593Smuzhiyun #include <linux/bitmap.h>
16*4882a593Smuzhiyun #include <linux/types.h>
17*4882a593Smuzhiyun #include <linux/jump_label.h>
18*4882a593Smuzhiyun #include <linux/kvm_types.h>
19*4882a593Smuzhiyun #include <linux/percpu.h>
20*4882a593Smuzhiyun #include <linux/psci.h>
21*4882a593Smuzhiyun #include <asm/arch_gicv3.h>
22*4882a593Smuzhiyun #include <asm/barrier.h>
23*4882a593Smuzhiyun #include <asm/cpufeature.h>
24*4882a593Smuzhiyun #include <asm/cputype.h>
25*4882a593Smuzhiyun #include <asm/daifflags.h>
26*4882a593Smuzhiyun #include <asm/fpsimd.h>
27*4882a593Smuzhiyun #include <asm/kvm.h>
28*4882a593Smuzhiyun #include <asm/kvm_asm.h>
29*4882a593Smuzhiyun #include <asm/thread_info.h>
30*4882a593Smuzhiyun
31*4882a593Smuzhiyun #define __KVM_HAVE_ARCH_INTC_INITIALIZED
32*4882a593Smuzhiyun
33*4882a593Smuzhiyun #define KVM_USER_MEM_SLOTS 512
34*4882a593Smuzhiyun #define KVM_HALT_POLL_NS_DEFAULT 500000
35*4882a593Smuzhiyun
36*4882a593Smuzhiyun #include <kvm/arm_vgic.h>
37*4882a593Smuzhiyun #include <kvm/arm_arch_timer.h>
38*4882a593Smuzhiyun #include <kvm/arm_pmu.h>
39*4882a593Smuzhiyun
40*4882a593Smuzhiyun #define KVM_MAX_VCPUS VGIC_V3_MAX_CPUS
41*4882a593Smuzhiyun
42*4882a593Smuzhiyun #define KVM_VCPU_MAX_FEATURES 7
43*4882a593Smuzhiyun
44*4882a593Smuzhiyun #define KVM_REQ_SLEEP \
45*4882a593Smuzhiyun KVM_ARCH_REQ_FLAGS(0, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
46*4882a593Smuzhiyun #define KVM_REQ_IRQ_PENDING KVM_ARCH_REQ(1)
47*4882a593Smuzhiyun #define KVM_REQ_VCPU_RESET KVM_ARCH_REQ(2)
48*4882a593Smuzhiyun #define KVM_REQ_RECORD_STEAL KVM_ARCH_REQ(3)
49*4882a593Smuzhiyun #define KVM_REQ_RELOAD_GICv4 KVM_ARCH_REQ(4)
50*4882a593Smuzhiyun
51*4882a593Smuzhiyun #define KVM_DIRTY_LOG_MANUAL_CAPS (KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE | \
52*4882a593Smuzhiyun KVM_DIRTY_LOG_INITIALLY_SET)
53*4882a593Smuzhiyun
54*4882a593Smuzhiyun /*
55*4882a593Smuzhiyun * Mode of operation configurable with kvm-arm.mode early param.
56*4882a593Smuzhiyun * See Documentation/admin-guide/kernel-parameters.txt for more information.
57*4882a593Smuzhiyun */
58*4882a593Smuzhiyun enum kvm_mode {
59*4882a593Smuzhiyun KVM_MODE_DEFAULT,
60*4882a593Smuzhiyun KVM_MODE_PROTECTED,
61*4882a593Smuzhiyun };
62*4882a593Smuzhiyun enum kvm_mode kvm_get_mode(void);
63*4882a593Smuzhiyun
64*4882a593Smuzhiyun DECLARE_STATIC_KEY_FALSE(userspace_irqchip_in_use);
65*4882a593Smuzhiyun
66*4882a593Smuzhiyun extern unsigned int kvm_sve_max_vl;
67*4882a593Smuzhiyun int kvm_arm_init_sve(void);
68*4882a593Smuzhiyun
69*4882a593Smuzhiyun int __attribute_const__ kvm_target_cpu(void);
70*4882a593Smuzhiyun int kvm_reset_vcpu(struct kvm_vcpu *vcpu);
71*4882a593Smuzhiyun void kvm_arm_vcpu_destroy(struct kvm_vcpu *vcpu);
72*4882a593Smuzhiyun
73*4882a593Smuzhiyun struct kvm_vmid {
74*4882a593Smuzhiyun /* The VMID generation used for the virt. memory system */
75*4882a593Smuzhiyun u64 vmid_gen;
76*4882a593Smuzhiyun u32 vmid;
77*4882a593Smuzhiyun };
78*4882a593Smuzhiyun
79*4882a593Smuzhiyun struct kvm_s2_mmu {
80*4882a593Smuzhiyun struct kvm_vmid vmid;
81*4882a593Smuzhiyun
82*4882a593Smuzhiyun /*
83*4882a593Smuzhiyun * stage2 entry level table
84*4882a593Smuzhiyun *
85*4882a593Smuzhiyun * Two kvm_s2_mmu structures in the same VM can point to the same
86*4882a593Smuzhiyun * pgd here. This happens when running a guest using a
87*4882a593Smuzhiyun * translation regime that isn't affected by its own stage-2
88*4882a593Smuzhiyun * translation, such as a non-VHE hypervisor running at vEL2, or
89*4882a593Smuzhiyun * for vEL1/EL0 with vHCR_EL2.VM == 0. In that case, we use the
90*4882a593Smuzhiyun * canonical stage-2 page tables.
91*4882a593Smuzhiyun */
92*4882a593Smuzhiyun phys_addr_t pgd_phys;
93*4882a593Smuzhiyun struct kvm_pgtable *pgt;
94*4882a593Smuzhiyun
95*4882a593Smuzhiyun /* The last vcpu id that ran on each physical CPU */
96*4882a593Smuzhiyun int __percpu *last_vcpu_ran;
97*4882a593Smuzhiyun
98*4882a593Smuzhiyun struct kvm_arch *arch;
99*4882a593Smuzhiyun };
100*4882a593Smuzhiyun
101*4882a593Smuzhiyun struct kvm_arch_memory_slot {
102*4882a593Smuzhiyun };
103*4882a593Smuzhiyun
104*4882a593Smuzhiyun struct kvm_arch {
105*4882a593Smuzhiyun struct kvm_s2_mmu mmu;
106*4882a593Smuzhiyun
107*4882a593Smuzhiyun /* VTCR_EL2 value for this VM */
108*4882a593Smuzhiyun u64 vtcr;
109*4882a593Smuzhiyun
110*4882a593Smuzhiyun /* The maximum number of vCPUs depends on the used GIC model */
111*4882a593Smuzhiyun int max_vcpus;
112*4882a593Smuzhiyun
113*4882a593Smuzhiyun /* Interrupt controller */
114*4882a593Smuzhiyun struct vgic_dist vgic;
115*4882a593Smuzhiyun
116*4882a593Smuzhiyun /* Mandated version of PSCI */
117*4882a593Smuzhiyun u32 psci_version;
118*4882a593Smuzhiyun
119*4882a593Smuzhiyun /*
120*4882a593Smuzhiyun * If we encounter a data abort without valid instruction syndrome
121*4882a593Smuzhiyun * information, report this to user space. User space can (and
122*4882a593Smuzhiyun * should) opt in to this feature if KVM_CAP_ARM_NISV_TO_USER is
123*4882a593Smuzhiyun * supported.
124*4882a593Smuzhiyun */
125*4882a593Smuzhiyun bool return_nisv_io_abort_to_user;
126*4882a593Smuzhiyun
127*4882a593Smuzhiyun /*
128*4882a593Smuzhiyun * VM-wide PMU filter, implemented as a bitmap and big enough for
129*4882a593Smuzhiyun * up to 2^10 events (ARMv8.0) or 2^16 events (ARMv8.1+).
130*4882a593Smuzhiyun */
131*4882a593Smuzhiyun unsigned long *pmu_filter;
132*4882a593Smuzhiyun unsigned int pmuver;
133*4882a593Smuzhiyun
134*4882a593Smuzhiyun u8 pfr0_csv2;
135*4882a593Smuzhiyun u8 pfr0_csv3;
136*4882a593Smuzhiyun };
137*4882a593Smuzhiyun
138*4882a593Smuzhiyun struct kvm_vcpu_fault_info {
139*4882a593Smuzhiyun u32 esr_el2; /* Hyp Syndrom Register */
140*4882a593Smuzhiyun u64 far_el2; /* Hyp Fault Address Register */
141*4882a593Smuzhiyun u64 hpfar_el2; /* Hyp IPA Fault Address Register */
142*4882a593Smuzhiyun u64 disr_el1; /* Deferred [SError] Status Register */
143*4882a593Smuzhiyun };
144*4882a593Smuzhiyun
145*4882a593Smuzhiyun enum vcpu_sysreg {
146*4882a593Smuzhiyun __INVALID_SYSREG__, /* 0 is reserved as an invalid value */
147*4882a593Smuzhiyun MPIDR_EL1, /* MultiProcessor Affinity Register */
148*4882a593Smuzhiyun CSSELR_EL1, /* Cache Size Selection Register */
149*4882a593Smuzhiyun SCTLR_EL1, /* System Control Register */
150*4882a593Smuzhiyun ACTLR_EL1, /* Auxiliary Control Register */
151*4882a593Smuzhiyun CPACR_EL1, /* Coprocessor Access Control */
152*4882a593Smuzhiyun ZCR_EL1, /* SVE Control */
153*4882a593Smuzhiyun TTBR0_EL1, /* Translation Table Base Register 0 */
154*4882a593Smuzhiyun TTBR1_EL1, /* Translation Table Base Register 1 */
155*4882a593Smuzhiyun TCR_EL1, /* Translation Control Register */
156*4882a593Smuzhiyun ESR_EL1, /* Exception Syndrome Register */
157*4882a593Smuzhiyun AFSR0_EL1, /* Auxiliary Fault Status Register 0 */
158*4882a593Smuzhiyun AFSR1_EL1, /* Auxiliary Fault Status Register 1 */
159*4882a593Smuzhiyun FAR_EL1, /* Fault Address Register */
160*4882a593Smuzhiyun MAIR_EL1, /* Memory Attribute Indirection Register */
161*4882a593Smuzhiyun VBAR_EL1, /* Vector Base Address Register */
162*4882a593Smuzhiyun CONTEXTIDR_EL1, /* Context ID Register */
163*4882a593Smuzhiyun TPIDR_EL0, /* Thread ID, User R/W */
164*4882a593Smuzhiyun TPIDRRO_EL0, /* Thread ID, User R/O */
165*4882a593Smuzhiyun TPIDR_EL1, /* Thread ID, Privileged */
166*4882a593Smuzhiyun AMAIR_EL1, /* Aux Memory Attribute Indirection Register */
167*4882a593Smuzhiyun CNTKCTL_EL1, /* Timer Control Register (EL1) */
168*4882a593Smuzhiyun PAR_EL1, /* Physical Address Register */
169*4882a593Smuzhiyun MDSCR_EL1, /* Monitor Debug System Control Register */
170*4882a593Smuzhiyun MDCCINT_EL1, /* Monitor Debug Comms Channel Interrupt Enable Reg */
171*4882a593Smuzhiyun DISR_EL1, /* Deferred Interrupt Status Register */
172*4882a593Smuzhiyun
173*4882a593Smuzhiyun /* Performance Monitors Registers */
174*4882a593Smuzhiyun PMCR_EL0, /* Control Register */
175*4882a593Smuzhiyun PMSELR_EL0, /* Event Counter Selection Register */
176*4882a593Smuzhiyun PMEVCNTR0_EL0, /* Event Counter Register (0-30) */
177*4882a593Smuzhiyun PMEVCNTR30_EL0 = PMEVCNTR0_EL0 + 30,
178*4882a593Smuzhiyun PMCCNTR_EL0, /* Cycle Counter Register */
179*4882a593Smuzhiyun PMEVTYPER0_EL0, /* Event Type Register (0-30) */
180*4882a593Smuzhiyun PMEVTYPER30_EL0 = PMEVTYPER0_EL0 + 30,
181*4882a593Smuzhiyun PMCCFILTR_EL0, /* Cycle Count Filter Register */
182*4882a593Smuzhiyun PMCNTENSET_EL0, /* Count Enable Set Register */
183*4882a593Smuzhiyun PMINTENSET_EL1, /* Interrupt Enable Set Register */
184*4882a593Smuzhiyun PMOVSSET_EL0, /* Overflow Flag Status Set Register */
185*4882a593Smuzhiyun PMSWINC_EL0, /* Software Increment Register */
186*4882a593Smuzhiyun PMUSERENR_EL0, /* User Enable Register */
187*4882a593Smuzhiyun
188*4882a593Smuzhiyun /* Pointer Authentication Registers in a strict increasing order. */
189*4882a593Smuzhiyun APIAKEYLO_EL1,
190*4882a593Smuzhiyun APIAKEYHI_EL1,
191*4882a593Smuzhiyun APIBKEYLO_EL1,
192*4882a593Smuzhiyun APIBKEYHI_EL1,
193*4882a593Smuzhiyun APDAKEYLO_EL1,
194*4882a593Smuzhiyun APDAKEYHI_EL1,
195*4882a593Smuzhiyun APDBKEYLO_EL1,
196*4882a593Smuzhiyun APDBKEYHI_EL1,
197*4882a593Smuzhiyun APGAKEYLO_EL1,
198*4882a593Smuzhiyun APGAKEYHI_EL1,
199*4882a593Smuzhiyun
200*4882a593Smuzhiyun ELR_EL1,
201*4882a593Smuzhiyun SP_EL1,
202*4882a593Smuzhiyun SPSR_EL1,
203*4882a593Smuzhiyun
204*4882a593Smuzhiyun CNTVOFF_EL2,
205*4882a593Smuzhiyun CNTV_CVAL_EL0,
206*4882a593Smuzhiyun CNTV_CTL_EL0,
207*4882a593Smuzhiyun CNTP_CVAL_EL0,
208*4882a593Smuzhiyun CNTP_CTL_EL0,
209*4882a593Smuzhiyun
210*4882a593Smuzhiyun /* 32bit specific registers. Keep them at the end of the range */
211*4882a593Smuzhiyun DACR32_EL2, /* Domain Access Control Register */
212*4882a593Smuzhiyun IFSR32_EL2, /* Instruction Fault Status Register */
213*4882a593Smuzhiyun FPEXC32_EL2, /* Floating-Point Exception Control Register */
214*4882a593Smuzhiyun DBGVCR32_EL2, /* Debug Vector Catch Register */
215*4882a593Smuzhiyun
216*4882a593Smuzhiyun NR_SYS_REGS /* Nothing after this line! */
217*4882a593Smuzhiyun };
218*4882a593Smuzhiyun
219*4882a593Smuzhiyun struct kvm_cpu_context {
220*4882a593Smuzhiyun struct user_pt_regs regs; /* sp = sp_el0 */
221*4882a593Smuzhiyun
222*4882a593Smuzhiyun u64 spsr_abt;
223*4882a593Smuzhiyun u64 spsr_und;
224*4882a593Smuzhiyun u64 spsr_irq;
225*4882a593Smuzhiyun u64 spsr_fiq;
226*4882a593Smuzhiyun
227*4882a593Smuzhiyun struct user_fpsimd_state fp_regs;
228*4882a593Smuzhiyun
229*4882a593Smuzhiyun u64 sys_regs[NR_SYS_REGS];
230*4882a593Smuzhiyun
231*4882a593Smuzhiyun struct kvm_vcpu *__hyp_running_vcpu;
232*4882a593Smuzhiyun };
233*4882a593Smuzhiyun
234*4882a593Smuzhiyun struct kvm_pmu_events {
235*4882a593Smuzhiyun u32 events_host;
236*4882a593Smuzhiyun u32 events_guest;
237*4882a593Smuzhiyun };
238*4882a593Smuzhiyun
239*4882a593Smuzhiyun struct kvm_host_data {
240*4882a593Smuzhiyun struct kvm_cpu_context host_ctxt;
241*4882a593Smuzhiyun struct kvm_pmu_events pmu_events;
242*4882a593Smuzhiyun };
243*4882a593Smuzhiyun
244*4882a593Smuzhiyun struct kvm_host_psci_config {
245*4882a593Smuzhiyun /* PSCI version used by host. */
246*4882a593Smuzhiyun u32 version;
247*4882a593Smuzhiyun
248*4882a593Smuzhiyun /* Function IDs used by host if version is v0.1. */
249*4882a593Smuzhiyun struct psci_0_1_function_ids function_ids_0_1;
250*4882a593Smuzhiyun
251*4882a593Smuzhiyun bool psci_0_1_cpu_suspend_implemented;
252*4882a593Smuzhiyun bool psci_0_1_cpu_on_implemented;
253*4882a593Smuzhiyun bool psci_0_1_cpu_off_implemented;
254*4882a593Smuzhiyun bool psci_0_1_migrate_implemented;
255*4882a593Smuzhiyun };
256*4882a593Smuzhiyun
257*4882a593Smuzhiyun extern struct kvm_host_psci_config kvm_nvhe_sym(kvm_host_psci_config);
258*4882a593Smuzhiyun #define kvm_host_psci_config CHOOSE_NVHE_SYM(kvm_host_psci_config)
259*4882a593Smuzhiyun
260*4882a593Smuzhiyun extern s64 kvm_nvhe_sym(hyp_physvirt_offset);
261*4882a593Smuzhiyun #define hyp_physvirt_offset CHOOSE_NVHE_SYM(hyp_physvirt_offset)
262*4882a593Smuzhiyun
263*4882a593Smuzhiyun extern u64 kvm_nvhe_sym(hyp_cpu_logical_map)[NR_CPUS];
264*4882a593Smuzhiyun #define hyp_cpu_logical_map CHOOSE_NVHE_SYM(hyp_cpu_logical_map)
265*4882a593Smuzhiyun
266*4882a593Smuzhiyun struct vcpu_reset_state {
267*4882a593Smuzhiyun unsigned long pc;
268*4882a593Smuzhiyun unsigned long r0;
269*4882a593Smuzhiyun bool be;
270*4882a593Smuzhiyun bool reset;
271*4882a593Smuzhiyun };
272*4882a593Smuzhiyun
273*4882a593Smuzhiyun struct kvm_vcpu_arch {
274*4882a593Smuzhiyun struct kvm_cpu_context ctxt;
275*4882a593Smuzhiyun void *sve_state;
276*4882a593Smuzhiyun unsigned int sve_max_vl;
277*4882a593Smuzhiyun
278*4882a593Smuzhiyun /* Stage 2 paging state used by the hardware on next switch */
279*4882a593Smuzhiyun struct kvm_s2_mmu *hw_mmu;
280*4882a593Smuzhiyun
281*4882a593Smuzhiyun /* HYP configuration */
282*4882a593Smuzhiyun u64 hcr_el2;
283*4882a593Smuzhiyun u32 mdcr_el2;
284*4882a593Smuzhiyun
285*4882a593Smuzhiyun /* Exception Information */
286*4882a593Smuzhiyun struct kvm_vcpu_fault_info fault;
287*4882a593Smuzhiyun
288*4882a593Smuzhiyun /* State of various workarounds, see kvm_asm.h for bit assignment */
289*4882a593Smuzhiyun u64 workaround_flags;
290*4882a593Smuzhiyun
291*4882a593Smuzhiyun /* Miscellaneous vcpu state flags */
292*4882a593Smuzhiyun u64 flags;
293*4882a593Smuzhiyun
294*4882a593Smuzhiyun /*
295*4882a593Smuzhiyun * We maintain more than a single set of debug registers to support
296*4882a593Smuzhiyun * debugging the guest from the host and to maintain separate host and
297*4882a593Smuzhiyun * guest state during world switches. vcpu_debug_state are the debug
298*4882a593Smuzhiyun * registers of the vcpu as the guest sees them. host_debug_state are
299*4882a593Smuzhiyun * the host registers which are saved and restored during
300*4882a593Smuzhiyun * world switches. external_debug_state contains the debug
301*4882a593Smuzhiyun * values we want to debug the guest. This is set via the
302*4882a593Smuzhiyun * KVM_SET_GUEST_DEBUG ioctl.
303*4882a593Smuzhiyun *
304*4882a593Smuzhiyun * debug_ptr points to the set of debug registers that should be loaded
305*4882a593Smuzhiyun * onto the hardware when running the guest.
306*4882a593Smuzhiyun */
307*4882a593Smuzhiyun struct kvm_guest_debug_arch *debug_ptr;
308*4882a593Smuzhiyun struct kvm_guest_debug_arch vcpu_debug_state;
309*4882a593Smuzhiyun struct kvm_guest_debug_arch external_debug_state;
310*4882a593Smuzhiyun
311*4882a593Smuzhiyun struct thread_info *host_thread_info; /* hyp VA */
312*4882a593Smuzhiyun struct user_fpsimd_state *host_fpsimd_state; /* hyp VA */
313*4882a593Smuzhiyun
314*4882a593Smuzhiyun struct {
315*4882a593Smuzhiyun /* {Break,watch}point registers */
316*4882a593Smuzhiyun struct kvm_guest_debug_arch regs;
317*4882a593Smuzhiyun /* Statistical profiling extension */
318*4882a593Smuzhiyun u64 pmscr_el1;
319*4882a593Smuzhiyun /* Self-hosted trace */
320*4882a593Smuzhiyun u64 trfcr_el1;
321*4882a593Smuzhiyun } host_debug_state;
322*4882a593Smuzhiyun
323*4882a593Smuzhiyun /* VGIC state */
324*4882a593Smuzhiyun struct vgic_cpu vgic_cpu;
325*4882a593Smuzhiyun struct arch_timer_cpu timer_cpu;
326*4882a593Smuzhiyun struct kvm_pmu pmu;
327*4882a593Smuzhiyun
328*4882a593Smuzhiyun /*
329*4882a593Smuzhiyun * Anything that is not used directly from assembly code goes
330*4882a593Smuzhiyun * here.
331*4882a593Smuzhiyun */
332*4882a593Smuzhiyun
333*4882a593Smuzhiyun /*
334*4882a593Smuzhiyun * Guest registers we preserve during guest debugging.
335*4882a593Smuzhiyun *
336*4882a593Smuzhiyun * These shadow registers are updated by the kvm_handle_sys_reg
337*4882a593Smuzhiyun * trap handler if the guest accesses or updates them while we
338*4882a593Smuzhiyun * are using guest debug.
339*4882a593Smuzhiyun */
340*4882a593Smuzhiyun struct {
341*4882a593Smuzhiyun u32 mdscr_el1;
342*4882a593Smuzhiyun } guest_debug_preserved;
343*4882a593Smuzhiyun
344*4882a593Smuzhiyun /* vcpu power-off state */
345*4882a593Smuzhiyun bool power_off;
346*4882a593Smuzhiyun
347*4882a593Smuzhiyun /* Don't run the guest (internal implementation need) */
348*4882a593Smuzhiyun bool pause;
349*4882a593Smuzhiyun
350*4882a593Smuzhiyun /* Cache some mmu pages needed inside spinlock regions */
351*4882a593Smuzhiyun struct kvm_mmu_memory_cache mmu_page_cache;
352*4882a593Smuzhiyun
353*4882a593Smuzhiyun /* Target CPU and feature flags */
354*4882a593Smuzhiyun int target;
355*4882a593Smuzhiyun DECLARE_BITMAP(features, KVM_VCPU_MAX_FEATURES);
356*4882a593Smuzhiyun
357*4882a593Smuzhiyun /* Detect first run of a vcpu */
358*4882a593Smuzhiyun bool has_run_once;
359*4882a593Smuzhiyun
360*4882a593Smuzhiyun /* Virtual SError ESR to restore when HCR_EL2.VSE is set */
361*4882a593Smuzhiyun u64 vsesr_el2;
362*4882a593Smuzhiyun
363*4882a593Smuzhiyun /* Additional reset state */
364*4882a593Smuzhiyun struct vcpu_reset_state reset_state;
365*4882a593Smuzhiyun
366*4882a593Smuzhiyun /* True when deferrable sysregs are loaded on the physical CPU,
367*4882a593Smuzhiyun * see kvm_vcpu_load_sysregs_vhe and kvm_vcpu_put_sysregs_vhe. */
368*4882a593Smuzhiyun bool sysregs_loaded_on_cpu;
369*4882a593Smuzhiyun
370*4882a593Smuzhiyun /* Guest PV state */
371*4882a593Smuzhiyun struct {
372*4882a593Smuzhiyun u64 last_steal;
373*4882a593Smuzhiyun gpa_t base;
374*4882a593Smuzhiyun } steal;
375*4882a593Smuzhiyun };
376*4882a593Smuzhiyun
377*4882a593Smuzhiyun /* Pointer to the vcpu's SVE FFR for sve_{save,load}_state() */
378*4882a593Smuzhiyun #define vcpu_sve_pffr(vcpu) (kern_hyp_va((vcpu)->arch.sve_state) + \
379*4882a593Smuzhiyun sve_ffr_offset((vcpu)->arch.sve_max_vl))
380*4882a593Smuzhiyun
381*4882a593Smuzhiyun #define vcpu_sve_max_vq(vcpu) sve_vq_from_vl((vcpu)->arch.sve_max_vl)
382*4882a593Smuzhiyun
383*4882a593Smuzhiyun #define vcpu_sve_state_size(vcpu) ({ \
384*4882a593Smuzhiyun size_t __size_ret; \
385*4882a593Smuzhiyun unsigned int __vcpu_vq; \
386*4882a593Smuzhiyun \
387*4882a593Smuzhiyun if (WARN_ON(!sve_vl_valid((vcpu)->arch.sve_max_vl))) { \
388*4882a593Smuzhiyun __size_ret = 0; \
389*4882a593Smuzhiyun } else { \
390*4882a593Smuzhiyun __vcpu_vq = vcpu_sve_max_vq(vcpu); \
391*4882a593Smuzhiyun __size_ret = SVE_SIG_REGS_SIZE(__vcpu_vq); \
392*4882a593Smuzhiyun } \
393*4882a593Smuzhiyun \
394*4882a593Smuzhiyun __size_ret; \
395*4882a593Smuzhiyun })
396*4882a593Smuzhiyun
397*4882a593Smuzhiyun /* vcpu_arch flags field values: */
398*4882a593Smuzhiyun #define KVM_ARM64_DEBUG_DIRTY (1 << 0)
399*4882a593Smuzhiyun #define KVM_ARM64_FP_ENABLED (1 << 1) /* guest FP regs loaded */
400*4882a593Smuzhiyun #define KVM_ARM64_FP_HOST (1 << 2) /* host FP regs loaded */
401*4882a593Smuzhiyun #define KVM_ARM64_HOST_SVE_IN_USE (1 << 3) /* backup for host TIF_SVE */
402*4882a593Smuzhiyun #define KVM_ARM64_HOST_SVE_ENABLED (1 << 4) /* SVE enabled for EL0 */
403*4882a593Smuzhiyun #define KVM_ARM64_GUEST_HAS_SVE (1 << 5) /* SVE exposed to guest */
404*4882a593Smuzhiyun #define KVM_ARM64_VCPU_SVE_FINALIZED (1 << 6) /* SVE config completed */
405*4882a593Smuzhiyun #define KVM_ARM64_GUEST_HAS_PTRAUTH (1 << 7) /* PTRAUTH exposed to guest */
406*4882a593Smuzhiyun #define KVM_ARM64_PENDING_EXCEPTION (1 << 8) /* Exception pending */
407*4882a593Smuzhiyun #define KVM_ARM64_EXCEPT_MASK (7 << 9) /* Target EL/MODE */
408*4882a593Smuzhiyun #define KVM_ARM64_DEBUG_STATE_SAVE_SPE (1 << 12) /* Save SPE context if active */
409*4882a593Smuzhiyun #define KVM_ARM64_DEBUG_STATE_SAVE_TRBE (1 << 13) /* Save TRBE context if active */
410*4882a593Smuzhiyun
411*4882a593Smuzhiyun /*
412*4882a593Smuzhiyun * When KVM_ARM64_PENDING_EXCEPTION is set, KVM_ARM64_EXCEPT_MASK can
413*4882a593Smuzhiyun * take the following values:
414*4882a593Smuzhiyun *
415*4882a593Smuzhiyun * For AArch32 EL1:
416*4882a593Smuzhiyun */
417*4882a593Smuzhiyun #define KVM_ARM64_EXCEPT_AA32_UND (0 << 9)
418*4882a593Smuzhiyun #define KVM_ARM64_EXCEPT_AA32_IABT (1 << 9)
419*4882a593Smuzhiyun #define KVM_ARM64_EXCEPT_AA32_DABT (2 << 9)
420*4882a593Smuzhiyun /* For AArch64: */
421*4882a593Smuzhiyun #define KVM_ARM64_EXCEPT_AA64_ELx_SYNC (0 << 9)
422*4882a593Smuzhiyun #define KVM_ARM64_EXCEPT_AA64_ELx_IRQ (1 << 9)
423*4882a593Smuzhiyun #define KVM_ARM64_EXCEPT_AA64_ELx_FIQ (2 << 9)
424*4882a593Smuzhiyun #define KVM_ARM64_EXCEPT_AA64_ELx_SERR (3 << 9)
425*4882a593Smuzhiyun #define KVM_ARM64_EXCEPT_AA64_EL1 (0 << 11)
426*4882a593Smuzhiyun #define KVM_ARM64_EXCEPT_AA64_EL2 (1 << 11)
427*4882a593Smuzhiyun
428*4882a593Smuzhiyun /*
429*4882a593Smuzhiyun * Overlaps with KVM_ARM64_EXCEPT_MASK on purpose so that it can't be
430*4882a593Smuzhiyun * set together with an exception...
431*4882a593Smuzhiyun */
432*4882a593Smuzhiyun #define KVM_ARM64_INCREMENT_PC (1 << 9) /* Increment PC */
433*4882a593Smuzhiyun
434*4882a593Smuzhiyun #define vcpu_has_sve(vcpu) (system_supports_sve() && \
435*4882a593Smuzhiyun ((vcpu)->arch.flags & KVM_ARM64_GUEST_HAS_SVE))
436*4882a593Smuzhiyun
437*4882a593Smuzhiyun #ifdef CONFIG_ARM64_PTR_AUTH
438*4882a593Smuzhiyun #define vcpu_has_ptrauth(vcpu) \
439*4882a593Smuzhiyun ((cpus_have_final_cap(ARM64_HAS_ADDRESS_AUTH) || \
440*4882a593Smuzhiyun cpus_have_final_cap(ARM64_HAS_GENERIC_AUTH)) && \
441*4882a593Smuzhiyun (vcpu)->arch.flags & KVM_ARM64_GUEST_HAS_PTRAUTH)
442*4882a593Smuzhiyun #else
443*4882a593Smuzhiyun #define vcpu_has_ptrauth(vcpu) false
444*4882a593Smuzhiyun #endif
445*4882a593Smuzhiyun
446*4882a593Smuzhiyun #define vcpu_gp_regs(v) (&(v)->arch.ctxt.regs)
447*4882a593Smuzhiyun
448*4882a593Smuzhiyun /*
449*4882a593Smuzhiyun * Only use __vcpu_sys_reg/ctxt_sys_reg if you know you want the
450*4882a593Smuzhiyun * memory backed version of a register, and not the one most recently
451*4882a593Smuzhiyun * accessed by a running VCPU. For example, for userspace access or
452*4882a593Smuzhiyun * for system registers that are never context switched, but only
453*4882a593Smuzhiyun * emulated.
454*4882a593Smuzhiyun */
455*4882a593Smuzhiyun #define __ctxt_sys_reg(c,r) (&(c)->sys_regs[(r)])
456*4882a593Smuzhiyun
457*4882a593Smuzhiyun #define ctxt_sys_reg(c,r) (*__ctxt_sys_reg(c,r))
458*4882a593Smuzhiyun
459*4882a593Smuzhiyun #define __vcpu_sys_reg(v,r) (ctxt_sys_reg(&(v)->arch.ctxt, (r)))
460*4882a593Smuzhiyun
461*4882a593Smuzhiyun u64 vcpu_read_sys_reg(const struct kvm_vcpu *vcpu, int reg);
462*4882a593Smuzhiyun void vcpu_write_sys_reg(struct kvm_vcpu *vcpu, u64 val, int reg);
463*4882a593Smuzhiyun
__vcpu_read_sys_reg_from_cpu(int reg,u64 * val)464*4882a593Smuzhiyun static inline bool __vcpu_read_sys_reg_from_cpu(int reg, u64 *val)
465*4882a593Smuzhiyun {
466*4882a593Smuzhiyun /*
467*4882a593Smuzhiyun * *** VHE ONLY ***
468*4882a593Smuzhiyun *
469*4882a593Smuzhiyun * System registers listed in the switch are not saved on every
470*4882a593Smuzhiyun * exit from the guest but are only saved on vcpu_put.
471*4882a593Smuzhiyun *
472*4882a593Smuzhiyun * Note that MPIDR_EL1 for the guest is set by KVM via VMPIDR_EL2 but
473*4882a593Smuzhiyun * should never be listed below, because the guest cannot modify its
474*4882a593Smuzhiyun * own MPIDR_EL1 and MPIDR_EL1 is accessed for VCPU A from VCPU B's
475*4882a593Smuzhiyun * thread when emulating cross-VCPU communication.
476*4882a593Smuzhiyun */
477*4882a593Smuzhiyun if (!has_vhe())
478*4882a593Smuzhiyun return false;
479*4882a593Smuzhiyun
480*4882a593Smuzhiyun switch (reg) {
481*4882a593Smuzhiyun case CSSELR_EL1: *val = read_sysreg_s(SYS_CSSELR_EL1); break;
482*4882a593Smuzhiyun case SCTLR_EL1: *val = read_sysreg_s(SYS_SCTLR_EL12); break;
483*4882a593Smuzhiyun case CPACR_EL1: *val = read_sysreg_s(SYS_CPACR_EL12); break;
484*4882a593Smuzhiyun case TTBR0_EL1: *val = read_sysreg_s(SYS_TTBR0_EL12); break;
485*4882a593Smuzhiyun case TTBR1_EL1: *val = read_sysreg_s(SYS_TTBR1_EL12); break;
486*4882a593Smuzhiyun case TCR_EL1: *val = read_sysreg_s(SYS_TCR_EL12); break;
487*4882a593Smuzhiyun case ESR_EL1: *val = read_sysreg_s(SYS_ESR_EL12); break;
488*4882a593Smuzhiyun case AFSR0_EL1: *val = read_sysreg_s(SYS_AFSR0_EL12); break;
489*4882a593Smuzhiyun case AFSR1_EL1: *val = read_sysreg_s(SYS_AFSR1_EL12); break;
490*4882a593Smuzhiyun case FAR_EL1: *val = read_sysreg_s(SYS_FAR_EL12); break;
491*4882a593Smuzhiyun case MAIR_EL1: *val = read_sysreg_s(SYS_MAIR_EL12); break;
492*4882a593Smuzhiyun case VBAR_EL1: *val = read_sysreg_s(SYS_VBAR_EL12); break;
493*4882a593Smuzhiyun case CONTEXTIDR_EL1: *val = read_sysreg_s(SYS_CONTEXTIDR_EL12);break;
494*4882a593Smuzhiyun case TPIDR_EL0: *val = read_sysreg_s(SYS_TPIDR_EL0); break;
495*4882a593Smuzhiyun case TPIDRRO_EL0: *val = read_sysreg_s(SYS_TPIDRRO_EL0); break;
496*4882a593Smuzhiyun case TPIDR_EL1: *val = read_sysreg_s(SYS_TPIDR_EL1); break;
497*4882a593Smuzhiyun case AMAIR_EL1: *val = read_sysreg_s(SYS_AMAIR_EL12); break;
498*4882a593Smuzhiyun case CNTKCTL_EL1: *val = read_sysreg_s(SYS_CNTKCTL_EL12); break;
499*4882a593Smuzhiyun case ELR_EL1: *val = read_sysreg_s(SYS_ELR_EL12); break;
500*4882a593Smuzhiyun case PAR_EL1: *val = read_sysreg_par(); break;
501*4882a593Smuzhiyun case DACR32_EL2: *val = read_sysreg_s(SYS_DACR32_EL2); break;
502*4882a593Smuzhiyun case IFSR32_EL2: *val = read_sysreg_s(SYS_IFSR32_EL2); break;
503*4882a593Smuzhiyun case DBGVCR32_EL2: *val = read_sysreg_s(SYS_DBGVCR32_EL2); break;
504*4882a593Smuzhiyun default: return false;
505*4882a593Smuzhiyun }
506*4882a593Smuzhiyun
507*4882a593Smuzhiyun return true;
508*4882a593Smuzhiyun }
509*4882a593Smuzhiyun
__vcpu_write_sys_reg_to_cpu(u64 val,int reg)510*4882a593Smuzhiyun static inline bool __vcpu_write_sys_reg_to_cpu(u64 val, int reg)
511*4882a593Smuzhiyun {
512*4882a593Smuzhiyun /*
513*4882a593Smuzhiyun * *** VHE ONLY ***
514*4882a593Smuzhiyun *
515*4882a593Smuzhiyun * System registers listed in the switch are not restored on every
516*4882a593Smuzhiyun * entry to the guest but are only restored on vcpu_load.
517*4882a593Smuzhiyun *
518*4882a593Smuzhiyun * Note that MPIDR_EL1 for the guest is set by KVM via VMPIDR_EL2 but
519*4882a593Smuzhiyun * should never be listed below, because the MPIDR should only be set
520*4882a593Smuzhiyun * once, before running the VCPU, and never changed later.
521*4882a593Smuzhiyun */
522*4882a593Smuzhiyun if (!has_vhe())
523*4882a593Smuzhiyun return false;
524*4882a593Smuzhiyun
525*4882a593Smuzhiyun switch (reg) {
526*4882a593Smuzhiyun case CSSELR_EL1: write_sysreg_s(val, SYS_CSSELR_EL1); break;
527*4882a593Smuzhiyun case SCTLR_EL1: write_sysreg_s(val, SYS_SCTLR_EL12); break;
528*4882a593Smuzhiyun case CPACR_EL1: write_sysreg_s(val, SYS_CPACR_EL12); break;
529*4882a593Smuzhiyun case TTBR0_EL1: write_sysreg_s(val, SYS_TTBR0_EL12); break;
530*4882a593Smuzhiyun case TTBR1_EL1: write_sysreg_s(val, SYS_TTBR1_EL12); break;
531*4882a593Smuzhiyun case TCR_EL1: write_sysreg_s(val, SYS_TCR_EL12); break;
532*4882a593Smuzhiyun case ESR_EL1: write_sysreg_s(val, SYS_ESR_EL12); break;
533*4882a593Smuzhiyun case AFSR0_EL1: write_sysreg_s(val, SYS_AFSR0_EL12); break;
534*4882a593Smuzhiyun case AFSR1_EL1: write_sysreg_s(val, SYS_AFSR1_EL12); break;
535*4882a593Smuzhiyun case FAR_EL1: write_sysreg_s(val, SYS_FAR_EL12); break;
536*4882a593Smuzhiyun case MAIR_EL1: write_sysreg_s(val, SYS_MAIR_EL12); break;
537*4882a593Smuzhiyun case VBAR_EL1: write_sysreg_s(val, SYS_VBAR_EL12); break;
538*4882a593Smuzhiyun case CONTEXTIDR_EL1: write_sysreg_s(val, SYS_CONTEXTIDR_EL12);break;
539*4882a593Smuzhiyun case TPIDR_EL0: write_sysreg_s(val, SYS_TPIDR_EL0); break;
540*4882a593Smuzhiyun case TPIDRRO_EL0: write_sysreg_s(val, SYS_TPIDRRO_EL0); break;
541*4882a593Smuzhiyun case TPIDR_EL1: write_sysreg_s(val, SYS_TPIDR_EL1); break;
542*4882a593Smuzhiyun case AMAIR_EL1: write_sysreg_s(val, SYS_AMAIR_EL12); break;
543*4882a593Smuzhiyun case CNTKCTL_EL1: write_sysreg_s(val, SYS_CNTKCTL_EL12); break;
544*4882a593Smuzhiyun case ELR_EL1: write_sysreg_s(val, SYS_ELR_EL12); break;
545*4882a593Smuzhiyun case PAR_EL1: write_sysreg_s(val, SYS_PAR_EL1); break;
546*4882a593Smuzhiyun case DACR32_EL2: write_sysreg_s(val, SYS_DACR32_EL2); break;
547*4882a593Smuzhiyun case IFSR32_EL2: write_sysreg_s(val, SYS_IFSR32_EL2); break;
548*4882a593Smuzhiyun case DBGVCR32_EL2: write_sysreg_s(val, SYS_DBGVCR32_EL2); break;
549*4882a593Smuzhiyun default: return false;
550*4882a593Smuzhiyun }
551*4882a593Smuzhiyun
552*4882a593Smuzhiyun return true;
553*4882a593Smuzhiyun }
554*4882a593Smuzhiyun
555*4882a593Smuzhiyun struct kvm_vm_stat {
556*4882a593Smuzhiyun ulong remote_tlb_flush;
557*4882a593Smuzhiyun };
558*4882a593Smuzhiyun
559*4882a593Smuzhiyun struct kvm_vcpu_stat {
560*4882a593Smuzhiyun u64 halt_successful_poll;
561*4882a593Smuzhiyun u64 halt_attempted_poll;
562*4882a593Smuzhiyun u64 halt_poll_success_ns;
563*4882a593Smuzhiyun u64 halt_poll_fail_ns;
564*4882a593Smuzhiyun u64 halt_poll_invalid;
565*4882a593Smuzhiyun u64 halt_wakeup;
566*4882a593Smuzhiyun u64 hvc_exit_stat;
567*4882a593Smuzhiyun u64 wfe_exit_stat;
568*4882a593Smuzhiyun u64 wfi_exit_stat;
569*4882a593Smuzhiyun u64 mmio_exit_user;
570*4882a593Smuzhiyun u64 mmio_exit_kernel;
571*4882a593Smuzhiyun u64 exits;
572*4882a593Smuzhiyun };
573*4882a593Smuzhiyun
574*4882a593Smuzhiyun int kvm_vcpu_preferred_target(struct kvm_vcpu_init *init);
575*4882a593Smuzhiyun unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu);
576*4882a593Smuzhiyun int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *indices);
577*4882a593Smuzhiyun int kvm_arm_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg);
578*4882a593Smuzhiyun int kvm_arm_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg);
579*4882a593Smuzhiyun
580*4882a593Smuzhiyun unsigned long kvm_arm_num_sys_reg_descs(struct kvm_vcpu *vcpu);
581*4882a593Smuzhiyun int kvm_arm_copy_sys_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices);
582*4882a593Smuzhiyun int kvm_arm_sys_reg_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *);
583*4882a593Smuzhiyun int kvm_arm_sys_reg_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *);
584*4882a593Smuzhiyun
585*4882a593Smuzhiyun int __kvm_arm_vcpu_get_events(struct kvm_vcpu *vcpu,
586*4882a593Smuzhiyun struct kvm_vcpu_events *events);
587*4882a593Smuzhiyun
588*4882a593Smuzhiyun int __kvm_arm_vcpu_set_events(struct kvm_vcpu *vcpu,
589*4882a593Smuzhiyun struct kvm_vcpu_events *events);
590*4882a593Smuzhiyun
591*4882a593Smuzhiyun #define KVM_ARCH_WANT_MMU_NOTIFIER
592*4882a593Smuzhiyun int kvm_unmap_hva_range(struct kvm *kvm,
593*4882a593Smuzhiyun unsigned long start, unsigned long end, unsigned flags);
594*4882a593Smuzhiyun int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
595*4882a593Smuzhiyun int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end);
596*4882a593Smuzhiyun int kvm_test_age_hva(struct kvm *kvm, unsigned long hva);
597*4882a593Smuzhiyun
598*4882a593Smuzhiyun void kvm_arm_halt_guest(struct kvm *kvm);
599*4882a593Smuzhiyun void kvm_arm_resume_guest(struct kvm *kvm);
600*4882a593Smuzhiyun
601*4882a593Smuzhiyun #ifndef __KVM_NVHE_HYPERVISOR__
602*4882a593Smuzhiyun #define kvm_call_hyp_nvhe(f, ...) \
603*4882a593Smuzhiyun ({ \
604*4882a593Smuzhiyun struct arm_smccc_res res; \
605*4882a593Smuzhiyun \
606*4882a593Smuzhiyun arm_smccc_1_1_hvc(KVM_HOST_SMCCC_FUNC(f), \
607*4882a593Smuzhiyun ##__VA_ARGS__, &res); \
608*4882a593Smuzhiyun WARN_ON(res.a0 != SMCCC_RET_SUCCESS); \
609*4882a593Smuzhiyun \
610*4882a593Smuzhiyun res.a1; \
611*4882a593Smuzhiyun })
612*4882a593Smuzhiyun
613*4882a593Smuzhiyun /*
614*4882a593Smuzhiyun * The couple of isb() below are there to guarantee the same behaviour
615*4882a593Smuzhiyun * on VHE as on !VHE, where the eret to EL1 acts as a context
616*4882a593Smuzhiyun * synchronization event.
617*4882a593Smuzhiyun */
618*4882a593Smuzhiyun #define kvm_call_hyp(f, ...) \
619*4882a593Smuzhiyun do { \
620*4882a593Smuzhiyun if (has_vhe()) { \
621*4882a593Smuzhiyun f(__VA_ARGS__); \
622*4882a593Smuzhiyun isb(); \
623*4882a593Smuzhiyun } else { \
624*4882a593Smuzhiyun kvm_call_hyp_nvhe(f, ##__VA_ARGS__); \
625*4882a593Smuzhiyun } \
626*4882a593Smuzhiyun } while(0)
627*4882a593Smuzhiyun
628*4882a593Smuzhiyun #define kvm_call_hyp_ret(f, ...) \
629*4882a593Smuzhiyun ({ \
630*4882a593Smuzhiyun typeof(f(__VA_ARGS__)) ret; \
631*4882a593Smuzhiyun \
632*4882a593Smuzhiyun if (has_vhe()) { \
633*4882a593Smuzhiyun ret = f(__VA_ARGS__); \
634*4882a593Smuzhiyun isb(); \
635*4882a593Smuzhiyun } else { \
636*4882a593Smuzhiyun ret = kvm_call_hyp_nvhe(f, ##__VA_ARGS__); \
637*4882a593Smuzhiyun } \
638*4882a593Smuzhiyun \
639*4882a593Smuzhiyun ret; \
640*4882a593Smuzhiyun })
641*4882a593Smuzhiyun #else /* __KVM_NVHE_HYPERVISOR__ */
642*4882a593Smuzhiyun #define kvm_call_hyp(f, ...) f(__VA_ARGS__)
643*4882a593Smuzhiyun #define kvm_call_hyp_ret(f, ...) f(__VA_ARGS__)
644*4882a593Smuzhiyun #define kvm_call_hyp_nvhe(f, ...) f(__VA_ARGS__)
645*4882a593Smuzhiyun #endif /* __KVM_NVHE_HYPERVISOR__ */
646*4882a593Smuzhiyun
647*4882a593Smuzhiyun void force_vm_exit(const cpumask_t *mask);
648*4882a593Smuzhiyun void kvm_mmu_wp_memory_region(struct kvm *kvm, int slot);
649*4882a593Smuzhiyun
650*4882a593Smuzhiyun int handle_exit(struct kvm_vcpu *vcpu, int exception_index);
651*4882a593Smuzhiyun void handle_exit_early(struct kvm_vcpu *vcpu, int exception_index);
652*4882a593Smuzhiyun
653*4882a593Smuzhiyun int kvm_handle_cp14_load_store(struct kvm_vcpu *vcpu);
654*4882a593Smuzhiyun int kvm_handle_cp14_32(struct kvm_vcpu *vcpu);
655*4882a593Smuzhiyun int kvm_handle_cp14_64(struct kvm_vcpu *vcpu);
656*4882a593Smuzhiyun int kvm_handle_cp15_32(struct kvm_vcpu *vcpu);
657*4882a593Smuzhiyun int kvm_handle_cp15_64(struct kvm_vcpu *vcpu);
658*4882a593Smuzhiyun int kvm_handle_sys_reg(struct kvm_vcpu *vcpu);
659*4882a593Smuzhiyun
660*4882a593Smuzhiyun void kvm_reset_sys_regs(struct kvm_vcpu *vcpu);
661*4882a593Smuzhiyun
662*4882a593Smuzhiyun void kvm_sys_reg_table_init(void);
663*4882a593Smuzhiyun
664*4882a593Smuzhiyun /* MMIO helpers */
665*4882a593Smuzhiyun void kvm_mmio_write_buf(void *buf, unsigned int len, unsigned long data);
666*4882a593Smuzhiyun unsigned long kvm_mmio_read_buf(const void *buf, unsigned int len);
667*4882a593Smuzhiyun
668*4882a593Smuzhiyun int kvm_handle_mmio_return(struct kvm_vcpu *vcpu);
669*4882a593Smuzhiyun int io_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa);
670*4882a593Smuzhiyun
671*4882a593Smuzhiyun int kvm_perf_init(void);
672*4882a593Smuzhiyun int kvm_perf_teardown(void);
673*4882a593Smuzhiyun
674*4882a593Smuzhiyun long kvm_hypercall_pv_features(struct kvm_vcpu *vcpu);
675*4882a593Smuzhiyun gpa_t kvm_init_stolen_time(struct kvm_vcpu *vcpu);
676*4882a593Smuzhiyun void kvm_update_stolen_time(struct kvm_vcpu *vcpu);
677*4882a593Smuzhiyun
678*4882a593Smuzhiyun bool kvm_arm_pvtime_supported(void);
679*4882a593Smuzhiyun int kvm_arm_pvtime_set_attr(struct kvm_vcpu *vcpu,
680*4882a593Smuzhiyun struct kvm_device_attr *attr);
681*4882a593Smuzhiyun int kvm_arm_pvtime_get_attr(struct kvm_vcpu *vcpu,
682*4882a593Smuzhiyun struct kvm_device_attr *attr);
683*4882a593Smuzhiyun int kvm_arm_pvtime_has_attr(struct kvm_vcpu *vcpu,
684*4882a593Smuzhiyun struct kvm_device_attr *attr);
685*4882a593Smuzhiyun
kvm_arm_pvtime_vcpu_init(struct kvm_vcpu_arch * vcpu_arch)686*4882a593Smuzhiyun static inline void kvm_arm_pvtime_vcpu_init(struct kvm_vcpu_arch *vcpu_arch)
687*4882a593Smuzhiyun {
688*4882a593Smuzhiyun vcpu_arch->steal.base = GPA_INVALID;
689*4882a593Smuzhiyun }
690*4882a593Smuzhiyun
kvm_arm_is_pvtime_enabled(struct kvm_vcpu_arch * vcpu_arch)691*4882a593Smuzhiyun static inline bool kvm_arm_is_pvtime_enabled(struct kvm_vcpu_arch *vcpu_arch)
692*4882a593Smuzhiyun {
693*4882a593Smuzhiyun return (vcpu_arch->steal.base != GPA_INVALID);
694*4882a593Smuzhiyun }
695*4882a593Smuzhiyun
696*4882a593Smuzhiyun void kvm_set_sei_esr(struct kvm_vcpu *vcpu, u64 syndrome);
697*4882a593Smuzhiyun
698*4882a593Smuzhiyun struct kvm_vcpu *kvm_mpidr_to_vcpu(struct kvm *kvm, unsigned long mpidr);
699*4882a593Smuzhiyun
700*4882a593Smuzhiyun DECLARE_KVM_HYP_PER_CPU(struct kvm_host_data, kvm_host_data);
701*4882a593Smuzhiyun
kvm_init_host_cpu_context(struct kvm_cpu_context * cpu_ctxt)702*4882a593Smuzhiyun static inline void kvm_init_host_cpu_context(struct kvm_cpu_context *cpu_ctxt)
703*4882a593Smuzhiyun {
704*4882a593Smuzhiyun /* The host's MPIDR is immutable, so let's set it up at boot time */
705*4882a593Smuzhiyun ctxt_sys_reg(cpu_ctxt, MPIDR_EL1) = read_cpuid_mpidr();
706*4882a593Smuzhiyun }
707*4882a593Smuzhiyun
kvm_system_needs_idmapped_vectors(void)708*4882a593Smuzhiyun static inline bool kvm_system_needs_idmapped_vectors(void)
709*4882a593Smuzhiyun {
710*4882a593Smuzhiyun return cpus_have_const_cap(ARM64_SPECTRE_V3A);
711*4882a593Smuzhiyun }
712*4882a593Smuzhiyun
713*4882a593Smuzhiyun void kvm_arm_vcpu_ptrauth_trap(struct kvm_vcpu *vcpu);
714*4882a593Smuzhiyun
kvm_arch_hardware_unsetup(void)715*4882a593Smuzhiyun static inline void kvm_arch_hardware_unsetup(void) {}
kvm_arch_sync_events(struct kvm * kvm)716*4882a593Smuzhiyun static inline void kvm_arch_sync_events(struct kvm *kvm) {}
kvm_arch_sched_in(struct kvm_vcpu * vcpu,int cpu)717*4882a593Smuzhiyun static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {}
kvm_arch_vcpu_block_finish(struct kvm_vcpu * vcpu)718*4882a593Smuzhiyun static inline void kvm_arch_vcpu_block_finish(struct kvm_vcpu *vcpu) {}
719*4882a593Smuzhiyun
720*4882a593Smuzhiyun void kvm_arm_init_debug(void);
721*4882a593Smuzhiyun void kvm_arm_vcpu_init_debug(struct kvm_vcpu *vcpu);
722*4882a593Smuzhiyun void kvm_arm_setup_debug(struct kvm_vcpu *vcpu);
723*4882a593Smuzhiyun void kvm_arm_clear_debug(struct kvm_vcpu *vcpu);
724*4882a593Smuzhiyun void kvm_arm_reset_debug_ptr(struct kvm_vcpu *vcpu);
725*4882a593Smuzhiyun int kvm_arm_vcpu_arch_set_attr(struct kvm_vcpu *vcpu,
726*4882a593Smuzhiyun struct kvm_device_attr *attr);
727*4882a593Smuzhiyun int kvm_arm_vcpu_arch_get_attr(struct kvm_vcpu *vcpu,
728*4882a593Smuzhiyun struct kvm_device_attr *attr);
729*4882a593Smuzhiyun int kvm_arm_vcpu_arch_has_attr(struct kvm_vcpu *vcpu,
730*4882a593Smuzhiyun struct kvm_device_attr *attr);
731*4882a593Smuzhiyun
732*4882a593Smuzhiyun /* Guest/host FPSIMD coordination helpers */
733*4882a593Smuzhiyun int kvm_arch_vcpu_run_map_fp(struct kvm_vcpu *vcpu);
734*4882a593Smuzhiyun void kvm_arch_vcpu_load_fp(struct kvm_vcpu *vcpu);
735*4882a593Smuzhiyun void kvm_arch_vcpu_ctxsync_fp(struct kvm_vcpu *vcpu);
736*4882a593Smuzhiyun void kvm_arch_vcpu_put_fp(struct kvm_vcpu *vcpu);
737*4882a593Smuzhiyun
kvm_pmu_counter_deferred(struct perf_event_attr * attr)738*4882a593Smuzhiyun static inline bool kvm_pmu_counter_deferred(struct perf_event_attr *attr)
739*4882a593Smuzhiyun {
740*4882a593Smuzhiyun return (!has_vhe() && attr->exclude_host);
741*4882a593Smuzhiyun }
742*4882a593Smuzhiyun
743*4882a593Smuzhiyun /* Flags for host debug state */
744*4882a593Smuzhiyun void kvm_arch_vcpu_load_debug_state_flags(struct kvm_vcpu *vcpu);
745*4882a593Smuzhiyun void kvm_arch_vcpu_put_debug_state_flags(struct kvm_vcpu *vcpu);
746*4882a593Smuzhiyun
747*4882a593Smuzhiyun #ifdef CONFIG_KVM /* Avoid conflicts with core headers if CONFIG_KVM=n */
kvm_arch_vcpu_run_pid_change(struct kvm_vcpu * vcpu)748*4882a593Smuzhiyun static inline int kvm_arch_vcpu_run_pid_change(struct kvm_vcpu *vcpu)
749*4882a593Smuzhiyun {
750*4882a593Smuzhiyun return kvm_arch_vcpu_run_map_fp(vcpu);
751*4882a593Smuzhiyun }
752*4882a593Smuzhiyun
753*4882a593Smuzhiyun void kvm_set_pmu_events(u32 set, struct perf_event_attr *attr);
754*4882a593Smuzhiyun void kvm_clr_pmu_events(u32 clr);
755*4882a593Smuzhiyun
756*4882a593Smuzhiyun void kvm_vcpu_pmu_restore_guest(struct kvm_vcpu *vcpu);
757*4882a593Smuzhiyun void kvm_vcpu_pmu_restore_host(struct kvm_vcpu *vcpu);
758*4882a593Smuzhiyun #else
kvm_set_pmu_events(u32 set,struct perf_event_attr * attr)759*4882a593Smuzhiyun static inline void kvm_set_pmu_events(u32 set, struct perf_event_attr *attr) {}
kvm_clr_pmu_events(u32 clr)760*4882a593Smuzhiyun static inline void kvm_clr_pmu_events(u32 clr) {}
761*4882a593Smuzhiyun #endif
762*4882a593Smuzhiyun
763*4882a593Smuzhiyun void kvm_vcpu_load_sysregs_vhe(struct kvm_vcpu *vcpu);
764*4882a593Smuzhiyun void kvm_vcpu_put_sysregs_vhe(struct kvm_vcpu *vcpu);
765*4882a593Smuzhiyun
766*4882a593Smuzhiyun int kvm_set_ipa_limit(void);
767*4882a593Smuzhiyun
768*4882a593Smuzhiyun #define __KVM_HAVE_ARCH_VM_ALLOC
769*4882a593Smuzhiyun struct kvm *kvm_arch_alloc_vm(void);
770*4882a593Smuzhiyun void kvm_arch_free_vm(struct kvm *kvm);
771*4882a593Smuzhiyun
772*4882a593Smuzhiyun int kvm_arm_setup_stage2(struct kvm *kvm, unsigned long type);
773*4882a593Smuzhiyun
774*4882a593Smuzhiyun int kvm_arm_vcpu_finalize(struct kvm_vcpu *vcpu, int feature);
775*4882a593Smuzhiyun bool kvm_arm_vcpu_is_finalized(struct kvm_vcpu *vcpu);
776*4882a593Smuzhiyun
777*4882a593Smuzhiyun #define kvm_arm_vcpu_sve_finalized(vcpu) \
778*4882a593Smuzhiyun ((vcpu)->arch.flags & KVM_ARM64_VCPU_SVE_FINALIZED)
779*4882a593Smuzhiyun
780*4882a593Smuzhiyun #define kvm_vcpu_has_pmu(vcpu) \
781*4882a593Smuzhiyun (test_bit(KVM_ARM_VCPU_PMU_V3, (vcpu)->arch.features))
782*4882a593Smuzhiyun
783*4882a593Smuzhiyun #define kvm_supports_32bit_el0() \
784*4882a593Smuzhiyun (system_supports_32bit_el0() && \
785*4882a593Smuzhiyun !static_branch_unlikely(&arm64_mismatched_32bit_el0))
786*4882a593Smuzhiyun
787*4882a593Smuzhiyun int kvm_trng_call(struct kvm_vcpu *vcpu);
788*4882a593Smuzhiyun #ifdef CONFIG_KVM
789*4882a593Smuzhiyun extern phys_addr_t hyp_mem_base;
790*4882a593Smuzhiyun extern phys_addr_t hyp_mem_size;
791*4882a593Smuzhiyun void __init kvm_hyp_reserve(void);
792*4882a593Smuzhiyun #else
kvm_hyp_reserve(void)793*4882a593Smuzhiyun static inline void kvm_hyp_reserve(void) { }
794*4882a593Smuzhiyun #endif
795*4882a593Smuzhiyun
796*4882a593Smuzhiyun #endif /* __ARM64_KVM_HOST_H__ */
797