xref: /OK3568_Linux_fs/kernel/arch/arm64/kvm/guest.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Copyright (C) 2012,2013 - ARM Ltd
4*4882a593Smuzhiyun  * Author: Marc Zyngier <marc.zyngier@arm.com>
5*4882a593Smuzhiyun  *
6*4882a593Smuzhiyun  * Derived from arch/arm/kvm/guest.c:
7*4882a593Smuzhiyun  * Copyright (C) 2012 - Virtual Open Systems and Columbia University
8*4882a593Smuzhiyun  * Author: Christoffer Dall <c.dall@virtualopensystems.com>
9*4882a593Smuzhiyun  */
10*4882a593Smuzhiyun 
11*4882a593Smuzhiyun #include <linux/bits.h>
12*4882a593Smuzhiyun #include <linux/errno.h>
13*4882a593Smuzhiyun #include <linux/err.h>
14*4882a593Smuzhiyun #include <linux/nospec.h>
15*4882a593Smuzhiyun #include <linux/kvm_host.h>
16*4882a593Smuzhiyun #include <linux/module.h>
17*4882a593Smuzhiyun #include <linux/stddef.h>
18*4882a593Smuzhiyun #include <linux/string.h>
19*4882a593Smuzhiyun #include <linux/vmalloc.h>
20*4882a593Smuzhiyun #include <linux/fs.h>
21*4882a593Smuzhiyun #include <kvm/arm_psci.h>
22*4882a593Smuzhiyun #include <asm/cputype.h>
23*4882a593Smuzhiyun #include <linux/uaccess.h>
24*4882a593Smuzhiyun #include <asm/fpsimd.h>
25*4882a593Smuzhiyun #include <asm/kvm.h>
26*4882a593Smuzhiyun #include <asm/kvm_emulate.h>
27*4882a593Smuzhiyun #include <asm/sigcontext.h>
28*4882a593Smuzhiyun 
29*4882a593Smuzhiyun #include "trace.h"
30*4882a593Smuzhiyun 
31*4882a593Smuzhiyun struct kvm_stats_debugfs_item debugfs_entries[] = {
32*4882a593Smuzhiyun 	VCPU_STAT("halt_successful_poll", halt_successful_poll),
33*4882a593Smuzhiyun 	VCPU_STAT("halt_attempted_poll", halt_attempted_poll),
34*4882a593Smuzhiyun 	VCPU_STAT("halt_poll_invalid", halt_poll_invalid),
35*4882a593Smuzhiyun 	VCPU_STAT("halt_wakeup", halt_wakeup),
36*4882a593Smuzhiyun 	VCPU_STAT("hvc_exit_stat", hvc_exit_stat),
37*4882a593Smuzhiyun 	VCPU_STAT("wfe_exit_stat", wfe_exit_stat),
38*4882a593Smuzhiyun 	VCPU_STAT("wfi_exit_stat", wfi_exit_stat),
39*4882a593Smuzhiyun 	VCPU_STAT("mmio_exit_user", mmio_exit_user),
40*4882a593Smuzhiyun 	VCPU_STAT("mmio_exit_kernel", mmio_exit_kernel),
41*4882a593Smuzhiyun 	VCPU_STAT("exits", exits),
42*4882a593Smuzhiyun 	VCPU_STAT("halt_poll_success_ns", halt_poll_success_ns),
43*4882a593Smuzhiyun 	VCPU_STAT("halt_poll_fail_ns", halt_poll_fail_ns),
44*4882a593Smuzhiyun 	{ NULL }
45*4882a593Smuzhiyun };
46*4882a593Smuzhiyun 
core_reg_offset_is_vreg(u64 off)47*4882a593Smuzhiyun static bool core_reg_offset_is_vreg(u64 off)
48*4882a593Smuzhiyun {
49*4882a593Smuzhiyun 	return off >= KVM_REG_ARM_CORE_REG(fp_regs.vregs) &&
50*4882a593Smuzhiyun 		off < KVM_REG_ARM_CORE_REG(fp_regs.fpsr);
51*4882a593Smuzhiyun }
52*4882a593Smuzhiyun 
core_reg_offset_from_id(u64 id)53*4882a593Smuzhiyun static u64 core_reg_offset_from_id(u64 id)
54*4882a593Smuzhiyun {
55*4882a593Smuzhiyun 	return id & ~(KVM_REG_ARCH_MASK | KVM_REG_SIZE_MASK | KVM_REG_ARM_CORE);
56*4882a593Smuzhiyun }
57*4882a593Smuzhiyun 
core_reg_size_from_offset(const struct kvm_vcpu * vcpu,u64 off)58*4882a593Smuzhiyun static int core_reg_size_from_offset(const struct kvm_vcpu *vcpu, u64 off)
59*4882a593Smuzhiyun {
60*4882a593Smuzhiyun 	int size;
61*4882a593Smuzhiyun 
62*4882a593Smuzhiyun 	switch (off) {
63*4882a593Smuzhiyun 	case KVM_REG_ARM_CORE_REG(regs.regs[0]) ...
64*4882a593Smuzhiyun 	     KVM_REG_ARM_CORE_REG(regs.regs[30]):
65*4882a593Smuzhiyun 	case KVM_REG_ARM_CORE_REG(regs.sp):
66*4882a593Smuzhiyun 	case KVM_REG_ARM_CORE_REG(regs.pc):
67*4882a593Smuzhiyun 	case KVM_REG_ARM_CORE_REG(regs.pstate):
68*4882a593Smuzhiyun 	case KVM_REG_ARM_CORE_REG(sp_el1):
69*4882a593Smuzhiyun 	case KVM_REG_ARM_CORE_REG(elr_el1):
70*4882a593Smuzhiyun 	case KVM_REG_ARM_CORE_REG(spsr[0]) ...
71*4882a593Smuzhiyun 	     KVM_REG_ARM_CORE_REG(spsr[KVM_NR_SPSR - 1]):
72*4882a593Smuzhiyun 		size = sizeof(__u64);
73*4882a593Smuzhiyun 		break;
74*4882a593Smuzhiyun 
75*4882a593Smuzhiyun 	case KVM_REG_ARM_CORE_REG(fp_regs.vregs[0]) ...
76*4882a593Smuzhiyun 	     KVM_REG_ARM_CORE_REG(fp_regs.vregs[31]):
77*4882a593Smuzhiyun 		size = sizeof(__uint128_t);
78*4882a593Smuzhiyun 		break;
79*4882a593Smuzhiyun 
80*4882a593Smuzhiyun 	case KVM_REG_ARM_CORE_REG(fp_regs.fpsr):
81*4882a593Smuzhiyun 	case KVM_REG_ARM_CORE_REG(fp_regs.fpcr):
82*4882a593Smuzhiyun 		size = sizeof(__u32);
83*4882a593Smuzhiyun 		break;
84*4882a593Smuzhiyun 
85*4882a593Smuzhiyun 	default:
86*4882a593Smuzhiyun 		return -EINVAL;
87*4882a593Smuzhiyun 	}
88*4882a593Smuzhiyun 
89*4882a593Smuzhiyun 	if (!IS_ALIGNED(off, size / sizeof(__u32)))
90*4882a593Smuzhiyun 		return -EINVAL;
91*4882a593Smuzhiyun 
92*4882a593Smuzhiyun 	/*
93*4882a593Smuzhiyun 	 * The KVM_REG_ARM64_SVE regs must be used instead of
94*4882a593Smuzhiyun 	 * KVM_REG_ARM_CORE for accessing the FPSIMD V-registers on
95*4882a593Smuzhiyun 	 * SVE-enabled vcpus:
96*4882a593Smuzhiyun 	 */
97*4882a593Smuzhiyun 	if (vcpu_has_sve(vcpu) && core_reg_offset_is_vreg(off))
98*4882a593Smuzhiyun 		return -EINVAL;
99*4882a593Smuzhiyun 
100*4882a593Smuzhiyun 	return size;
101*4882a593Smuzhiyun }
102*4882a593Smuzhiyun 
core_reg_addr(struct kvm_vcpu * vcpu,const struct kvm_one_reg * reg)103*4882a593Smuzhiyun static void *core_reg_addr(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
104*4882a593Smuzhiyun {
105*4882a593Smuzhiyun 	u64 off = core_reg_offset_from_id(reg->id);
106*4882a593Smuzhiyun 	int size = core_reg_size_from_offset(vcpu, off);
107*4882a593Smuzhiyun 
108*4882a593Smuzhiyun 	if (size < 0)
109*4882a593Smuzhiyun 		return NULL;
110*4882a593Smuzhiyun 
111*4882a593Smuzhiyun 	if (KVM_REG_SIZE(reg->id) != size)
112*4882a593Smuzhiyun 		return NULL;
113*4882a593Smuzhiyun 
114*4882a593Smuzhiyun 	switch (off) {
115*4882a593Smuzhiyun 	case KVM_REG_ARM_CORE_REG(regs.regs[0]) ...
116*4882a593Smuzhiyun 	     KVM_REG_ARM_CORE_REG(regs.regs[30]):
117*4882a593Smuzhiyun 		off -= KVM_REG_ARM_CORE_REG(regs.regs[0]);
118*4882a593Smuzhiyun 		off /= 2;
119*4882a593Smuzhiyun 		return &vcpu->arch.ctxt.regs.regs[off];
120*4882a593Smuzhiyun 
121*4882a593Smuzhiyun 	case KVM_REG_ARM_CORE_REG(regs.sp):
122*4882a593Smuzhiyun 		return &vcpu->arch.ctxt.regs.sp;
123*4882a593Smuzhiyun 
124*4882a593Smuzhiyun 	case KVM_REG_ARM_CORE_REG(regs.pc):
125*4882a593Smuzhiyun 		return &vcpu->arch.ctxt.regs.pc;
126*4882a593Smuzhiyun 
127*4882a593Smuzhiyun 	case KVM_REG_ARM_CORE_REG(regs.pstate):
128*4882a593Smuzhiyun 		return &vcpu->arch.ctxt.regs.pstate;
129*4882a593Smuzhiyun 
130*4882a593Smuzhiyun 	case KVM_REG_ARM_CORE_REG(sp_el1):
131*4882a593Smuzhiyun 		return __ctxt_sys_reg(&vcpu->arch.ctxt, SP_EL1);
132*4882a593Smuzhiyun 
133*4882a593Smuzhiyun 	case KVM_REG_ARM_CORE_REG(elr_el1):
134*4882a593Smuzhiyun 		return __ctxt_sys_reg(&vcpu->arch.ctxt, ELR_EL1);
135*4882a593Smuzhiyun 
136*4882a593Smuzhiyun 	case KVM_REG_ARM_CORE_REG(spsr[KVM_SPSR_EL1]):
137*4882a593Smuzhiyun 		return __ctxt_sys_reg(&vcpu->arch.ctxt, SPSR_EL1);
138*4882a593Smuzhiyun 
139*4882a593Smuzhiyun 	case KVM_REG_ARM_CORE_REG(spsr[KVM_SPSR_ABT]):
140*4882a593Smuzhiyun 		return &vcpu->arch.ctxt.spsr_abt;
141*4882a593Smuzhiyun 
142*4882a593Smuzhiyun 	case KVM_REG_ARM_CORE_REG(spsr[KVM_SPSR_UND]):
143*4882a593Smuzhiyun 		return &vcpu->arch.ctxt.spsr_und;
144*4882a593Smuzhiyun 
145*4882a593Smuzhiyun 	case KVM_REG_ARM_CORE_REG(spsr[KVM_SPSR_IRQ]):
146*4882a593Smuzhiyun 		return &vcpu->arch.ctxt.spsr_irq;
147*4882a593Smuzhiyun 
148*4882a593Smuzhiyun 	case KVM_REG_ARM_CORE_REG(spsr[KVM_SPSR_FIQ]):
149*4882a593Smuzhiyun 		return &vcpu->arch.ctxt.spsr_fiq;
150*4882a593Smuzhiyun 
151*4882a593Smuzhiyun 	case KVM_REG_ARM_CORE_REG(fp_regs.vregs[0]) ...
152*4882a593Smuzhiyun 	     KVM_REG_ARM_CORE_REG(fp_regs.vregs[31]):
153*4882a593Smuzhiyun 		off -= KVM_REG_ARM_CORE_REG(fp_regs.vregs[0]);
154*4882a593Smuzhiyun 		off /= 4;
155*4882a593Smuzhiyun 		return &vcpu->arch.ctxt.fp_regs.vregs[off];
156*4882a593Smuzhiyun 
157*4882a593Smuzhiyun 	case KVM_REG_ARM_CORE_REG(fp_regs.fpsr):
158*4882a593Smuzhiyun 		return &vcpu->arch.ctxt.fp_regs.fpsr;
159*4882a593Smuzhiyun 
160*4882a593Smuzhiyun 	case KVM_REG_ARM_CORE_REG(fp_regs.fpcr):
161*4882a593Smuzhiyun 		return &vcpu->arch.ctxt.fp_regs.fpcr;
162*4882a593Smuzhiyun 
163*4882a593Smuzhiyun 	default:
164*4882a593Smuzhiyun 		return NULL;
165*4882a593Smuzhiyun 	}
166*4882a593Smuzhiyun }
167*4882a593Smuzhiyun 
get_core_reg(struct kvm_vcpu * vcpu,const struct kvm_one_reg * reg)168*4882a593Smuzhiyun static int get_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
169*4882a593Smuzhiyun {
170*4882a593Smuzhiyun 	/*
171*4882a593Smuzhiyun 	 * Because the kvm_regs structure is a mix of 32, 64 and
172*4882a593Smuzhiyun 	 * 128bit fields, we index it as if it was a 32bit
173*4882a593Smuzhiyun 	 * array. Hence below, nr_regs is the number of entries, and
174*4882a593Smuzhiyun 	 * off the index in the "array".
175*4882a593Smuzhiyun 	 */
176*4882a593Smuzhiyun 	__u32 __user *uaddr = (__u32 __user *)(unsigned long)reg->addr;
177*4882a593Smuzhiyun 	int nr_regs = sizeof(struct kvm_regs) / sizeof(__u32);
178*4882a593Smuzhiyun 	void *addr;
179*4882a593Smuzhiyun 	u32 off;
180*4882a593Smuzhiyun 
181*4882a593Smuzhiyun 	/* Our ID is an index into the kvm_regs struct. */
182*4882a593Smuzhiyun 	off = core_reg_offset_from_id(reg->id);
183*4882a593Smuzhiyun 	if (off >= nr_regs ||
184*4882a593Smuzhiyun 	    (off + (KVM_REG_SIZE(reg->id) / sizeof(__u32))) >= nr_regs)
185*4882a593Smuzhiyun 		return -ENOENT;
186*4882a593Smuzhiyun 
187*4882a593Smuzhiyun 	addr = core_reg_addr(vcpu, reg);
188*4882a593Smuzhiyun 	if (!addr)
189*4882a593Smuzhiyun 		return -EINVAL;
190*4882a593Smuzhiyun 
191*4882a593Smuzhiyun 	if (copy_to_user(uaddr, addr, KVM_REG_SIZE(reg->id)))
192*4882a593Smuzhiyun 		return -EFAULT;
193*4882a593Smuzhiyun 
194*4882a593Smuzhiyun 	return 0;
195*4882a593Smuzhiyun }
196*4882a593Smuzhiyun 
set_core_reg(struct kvm_vcpu * vcpu,const struct kvm_one_reg * reg)197*4882a593Smuzhiyun static int set_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
198*4882a593Smuzhiyun {
199*4882a593Smuzhiyun 	__u32 __user *uaddr = (__u32 __user *)(unsigned long)reg->addr;
200*4882a593Smuzhiyun 	int nr_regs = sizeof(struct kvm_regs) / sizeof(__u32);
201*4882a593Smuzhiyun 	__uint128_t tmp;
202*4882a593Smuzhiyun 	void *valp = &tmp, *addr;
203*4882a593Smuzhiyun 	u64 off;
204*4882a593Smuzhiyun 	int err = 0;
205*4882a593Smuzhiyun 
206*4882a593Smuzhiyun 	/* Our ID is an index into the kvm_regs struct. */
207*4882a593Smuzhiyun 	off = core_reg_offset_from_id(reg->id);
208*4882a593Smuzhiyun 	if (off >= nr_regs ||
209*4882a593Smuzhiyun 	    (off + (KVM_REG_SIZE(reg->id) / sizeof(__u32))) >= nr_regs)
210*4882a593Smuzhiyun 		return -ENOENT;
211*4882a593Smuzhiyun 
212*4882a593Smuzhiyun 	addr = core_reg_addr(vcpu, reg);
213*4882a593Smuzhiyun 	if (!addr)
214*4882a593Smuzhiyun 		return -EINVAL;
215*4882a593Smuzhiyun 
216*4882a593Smuzhiyun 	if (KVM_REG_SIZE(reg->id) > sizeof(tmp))
217*4882a593Smuzhiyun 		return -EINVAL;
218*4882a593Smuzhiyun 
219*4882a593Smuzhiyun 	if (copy_from_user(valp, uaddr, KVM_REG_SIZE(reg->id))) {
220*4882a593Smuzhiyun 		err = -EFAULT;
221*4882a593Smuzhiyun 		goto out;
222*4882a593Smuzhiyun 	}
223*4882a593Smuzhiyun 
224*4882a593Smuzhiyun 	if (off == KVM_REG_ARM_CORE_REG(regs.pstate)) {
225*4882a593Smuzhiyun 		u64 mode = (*(u64 *)valp) & PSR_AA32_MODE_MASK;
226*4882a593Smuzhiyun 		switch (mode) {
227*4882a593Smuzhiyun 		case PSR_AA32_MODE_USR:
228*4882a593Smuzhiyun 			if (!kvm_supports_32bit_el0())
229*4882a593Smuzhiyun 				return -EINVAL;
230*4882a593Smuzhiyun 			break;
231*4882a593Smuzhiyun 		case PSR_AA32_MODE_FIQ:
232*4882a593Smuzhiyun 		case PSR_AA32_MODE_IRQ:
233*4882a593Smuzhiyun 		case PSR_AA32_MODE_SVC:
234*4882a593Smuzhiyun 		case PSR_AA32_MODE_ABT:
235*4882a593Smuzhiyun 		case PSR_AA32_MODE_UND:
236*4882a593Smuzhiyun 			if (!vcpu_el1_is_32bit(vcpu))
237*4882a593Smuzhiyun 				return -EINVAL;
238*4882a593Smuzhiyun 			break;
239*4882a593Smuzhiyun 		case PSR_MODE_EL0t:
240*4882a593Smuzhiyun 		case PSR_MODE_EL1t:
241*4882a593Smuzhiyun 		case PSR_MODE_EL1h:
242*4882a593Smuzhiyun 			if (vcpu_el1_is_32bit(vcpu))
243*4882a593Smuzhiyun 				return -EINVAL;
244*4882a593Smuzhiyun 			break;
245*4882a593Smuzhiyun 		default:
246*4882a593Smuzhiyun 			err = -EINVAL;
247*4882a593Smuzhiyun 			goto out;
248*4882a593Smuzhiyun 		}
249*4882a593Smuzhiyun 	}
250*4882a593Smuzhiyun 
251*4882a593Smuzhiyun 	memcpy(addr, valp, KVM_REG_SIZE(reg->id));
252*4882a593Smuzhiyun 
253*4882a593Smuzhiyun 	if (*vcpu_cpsr(vcpu) & PSR_MODE32_BIT) {
254*4882a593Smuzhiyun 		int i, nr_reg;
255*4882a593Smuzhiyun 
256*4882a593Smuzhiyun 		switch (*vcpu_cpsr(vcpu)) {
257*4882a593Smuzhiyun 		/*
258*4882a593Smuzhiyun 		 * Either we are dealing with user mode, and only the
259*4882a593Smuzhiyun 		 * first 15 registers (+ PC) must be narrowed to 32bit.
260*4882a593Smuzhiyun 		 * AArch32 r0-r14 conveniently map to AArch64 x0-x14.
261*4882a593Smuzhiyun 		 */
262*4882a593Smuzhiyun 		case PSR_AA32_MODE_USR:
263*4882a593Smuzhiyun 		case PSR_AA32_MODE_SYS:
264*4882a593Smuzhiyun 			nr_reg = 15;
265*4882a593Smuzhiyun 			break;
266*4882a593Smuzhiyun 
267*4882a593Smuzhiyun 		/*
268*4882a593Smuzhiyun 		 * Otherwide, this is a priviledged mode, and *all* the
269*4882a593Smuzhiyun 		 * registers must be narrowed to 32bit.
270*4882a593Smuzhiyun 		 */
271*4882a593Smuzhiyun 		default:
272*4882a593Smuzhiyun 			nr_reg = 31;
273*4882a593Smuzhiyun 			break;
274*4882a593Smuzhiyun 		}
275*4882a593Smuzhiyun 
276*4882a593Smuzhiyun 		for (i = 0; i < nr_reg; i++)
277*4882a593Smuzhiyun 			vcpu_set_reg(vcpu, i, (u32)vcpu_get_reg(vcpu, i));
278*4882a593Smuzhiyun 
279*4882a593Smuzhiyun 		*vcpu_pc(vcpu) = (u32)*vcpu_pc(vcpu);
280*4882a593Smuzhiyun 	}
281*4882a593Smuzhiyun out:
282*4882a593Smuzhiyun 	return err;
283*4882a593Smuzhiyun }
284*4882a593Smuzhiyun 
285*4882a593Smuzhiyun #define vq_word(vq) (((vq) - SVE_VQ_MIN) / 64)
286*4882a593Smuzhiyun #define vq_mask(vq) ((u64)1 << ((vq) - SVE_VQ_MIN) % 64)
287*4882a593Smuzhiyun #define vq_present(vqs, vq) (!!((vqs)[vq_word(vq)] & vq_mask(vq)))
288*4882a593Smuzhiyun 
get_sve_vls(struct kvm_vcpu * vcpu,const struct kvm_one_reg * reg)289*4882a593Smuzhiyun static int get_sve_vls(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
290*4882a593Smuzhiyun {
291*4882a593Smuzhiyun 	unsigned int max_vq, vq;
292*4882a593Smuzhiyun 	u64 vqs[KVM_ARM64_SVE_VLS_WORDS];
293*4882a593Smuzhiyun 
294*4882a593Smuzhiyun 	if (!vcpu_has_sve(vcpu))
295*4882a593Smuzhiyun 		return -ENOENT;
296*4882a593Smuzhiyun 
297*4882a593Smuzhiyun 	if (WARN_ON(!sve_vl_valid(vcpu->arch.sve_max_vl)))
298*4882a593Smuzhiyun 		return -EINVAL;
299*4882a593Smuzhiyun 
300*4882a593Smuzhiyun 	memset(vqs, 0, sizeof(vqs));
301*4882a593Smuzhiyun 
302*4882a593Smuzhiyun 	max_vq = vcpu_sve_max_vq(vcpu);
303*4882a593Smuzhiyun 	for (vq = SVE_VQ_MIN; vq <= max_vq; ++vq)
304*4882a593Smuzhiyun 		if (sve_vq_available(vq))
305*4882a593Smuzhiyun 			vqs[vq_word(vq)] |= vq_mask(vq);
306*4882a593Smuzhiyun 
307*4882a593Smuzhiyun 	if (copy_to_user((void __user *)reg->addr, vqs, sizeof(vqs)))
308*4882a593Smuzhiyun 		return -EFAULT;
309*4882a593Smuzhiyun 
310*4882a593Smuzhiyun 	return 0;
311*4882a593Smuzhiyun }
312*4882a593Smuzhiyun 
set_sve_vls(struct kvm_vcpu * vcpu,const struct kvm_one_reg * reg)313*4882a593Smuzhiyun static int set_sve_vls(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
314*4882a593Smuzhiyun {
315*4882a593Smuzhiyun 	unsigned int max_vq, vq;
316*4882a593Smuzhiyun 	u64 vqs[KVM_ARM64_SVE_VLS_WORDS];
317*4882a593Smuzhiyun 
318*4882a593Smuzhiyun 	if (!vcpu_has_sve(vcpu))
319*4882a593Smuzhiyun 		return -ENOENT;
320*4882a593Smuzhiyun 
321*4882a593Smuzhiyun 	if (kvm_arm_vcpu_sve_finalized(vcpu))
322*4882a593Smuzhiyun 		return -EPERM; /* too late! */
323*4882a593Smuzhiyun 
324*4882a593Smuzhiyun 	if (WARN_ON(vcpu->arch.sve_state))
325*4882a593Smuzhiyun 		return -EINVAL;
326*4882a593Smuzhiyun 
327*4882a593Smuzhiyun 	if (copy_from_user(vqs, (const void __user *)reg->addr, sizeof(vqs)))
328*4882a593Smuzhiyun 		return -EFAULT;
329*4882a593Smuzhiyun 
330*4882a593Smuzhiyun 	max_vq = 0;
331*4882a593Smuzhiyun 	for (vq = SVE_VQ_MIN; vq <= SVE_VQ_MAX; ++vq)
332*4882a593Smuzhiyun 		if (vq_present(vqs, vq))
333*4882a593Smuzhiyun 			max_vq = vq;
334*4882a593Smuzhiyun 
335*4882a593Smuzhiyun 	if (max_vq > sve_vq_from_vl(kvm_sve_max_vl))
336*4882a593Smuzhiyun 		return -EINVAL;
337*4882a593Smuzhiyun 
338*4882a593Smuzhiyun 	/*
339*4882a593Smuzhiyun 	 * Vector lengths supported by the host can't currently be
340*4882a593Smuzhiyun 	 * hidden from the guest individually: instead we can only set a
341*4882a593Smuzhiyun 	 * maximum via ZCR_EL2.LEN.  So, make sure the available vector
342*4882a593Smuzhiyun 	 * lengths match the set requested exactly up to the requested
343*4882a593Smuzhiyun 	 * maximum:
344*4882a593Smuzhiyun 	 */
345*4882a593Smuzhiyun 	for (vq = SVE_VQ_MIN; vq <= max_vq; ++vq)
346*4882a593Smuzhiyun 		if (vq_present(vqs, vq) != sve_vq_available(vq))
347*4882a593Smuzhiyun 			return -EINVAL;
348*4882a593Smuzhiyun 
349*4882a593Smuzhiyun 	/* Can't run with no vector lengths at all: */
350*4882a593Smuzhiyun 	if (max_vq < SVE_VQ_MIN)
351*4882a593Smuzhiyun 		return -EINVAL;
352*4882a593Smuzhiyun 
353*4882a593Smuzhiyun 	/* vcpu->arch.sve_state will be alloc'd by kvm_vcpu_finalize_sve() */
354*4882a593Smuzhiyun 	vcpu->arch.sve_max_vl = sve_vl_from_vq(max_vq);
355*4882a593Smuzhiyun 
356*4882a593Smuzhiyun 	return 0;
357*4882a593Smuzhiyun }
358*4882a593Smuzhiyun 
359*4882a593Smuzhiyun #define SVE_REG_SLICE_SHIFT	0
360*4882a593Smuzhiyun #define SVE_REG_SLICE_BITS	5
361*4882a593Smuzhiyun #define SVE_REG_ID_SHIFT	(SVE_REG_SLICE_SHIFT + SVE_REG_SLICE_BITS)
362*4882a593Smuzhiyun #define SVE_REG_ID_BITS		5
363*4882a593Smuzhiyun 
364*4882a593Smuzhiyun #define SVE_REG_SLICE_MASK					\
365*4882a593Smuzhiyun 	GENMASK(SVE_REG_SLICE_SHIFT + SVE_REG_SLICE_BITS - 1,	\
366*4882a593Smuzhiyun 		SVE_REG_SLICE_SHIFT)
367*4882a593Smuzhiyun #define SVE_REG_ID_MASK							\
368*4882a593Smuzhiyun 	GENMASK(SVE_REG_ID_SHIFT + SVE_REG_ID_BITS - 1, SVE_REG_ID_SHIFT)
369*4882a593Smuzhiyun 
370*4882a593Smuzhiyun #define SVE_NUM_SLICES (1 << SVE_REG_SLICE_BITS)
371*4882a593Smuzhiyun 
372*4882a593Smuzhiyun #define KVM_SVE_ZREG_SIZE KVM_REG_SIZE(KVM_REG_ARM64_SVE_ZREG(0, 0))
373*4882a593Smuzhiyun #define KVM_SVE_PREG_SIZE KVM_REG_SIZE(KVM_REG_ARM64_SVE_PREG(0, 0))
374*4882a593Smuzhiyun 
375*4882a593Smuzhiyun /*
376*4882a593Smuzhiyun  * Number of register slices required to cover each whole SVE register.
377*4882a593Smuzhiyun  * NOTE: Only the first slice every exists, for now.
378*4882a593Smuzhiyun  * If you are tempted to modify this, you must also rework sve_reg_to_region()
379*4882a593Smuzhiyun  * to match:
380*4882a593Smuzhiyun  */
381*4882a593Smuzhiyun #define vcpu_sve_slices(vcpu) 1
382*4882a593Smuzhiyun 
383*4882a593Smuzhiyun /* Bounds of a single SVE register slice within vcpu->arch.sve_state */
384*4882a593Smuzhiyun struct sve_state_reg_region {
385*4882a593Smuzhiyun 	unsigned int koffset;	/* offset into sve_state in kernel memory */
386*4882a593Smuzhiyun 	unsigned int klen;	/* length in kernel memory */
387*4882a593Smuzhiyun 	unsigned int upad;	/* extra trailing padding in user memory */
388*4882a593Smuzhiyun };
389*4882a593Smuzhiyun 
390*4882a593Smuzhiyun /*
391*4882a593Smuzhiyun  * Validate SVE register ID and get sanitised bounds for user/kernel SVE
392*4882a593Smuzhiyun  * register copy
393*4882a593Smuzhiyun  */
sve_reg_to_region(struct sve_state_reg_region * region,struct kvm_vcpu * vcpu,const struct kvm_one_reg * reg)394*4882a593Smuzhiyun static int sve_reg_to_region(struct sve_state_reg_region *region,
395*4882a593Smuzhiyun 			     struct kvm_vcpu *vcpu,
396*4882a593Smuzhiyun 			     const struct kvm_one_reg *reg)
397*4882a593Smuzhiyun {
398*4882a593Smuzhiyun 	/* reg ID ranges for Z- registers */
399*4882a593Smuzhiyun 	const u64 zreg_id_min = KVM_REG_ARM64_SVE_ZREG(0, 0);
400*4882a593Smuzhiyun 	const u64 zreg_id_max = KVM_REG_ARM64_SVE_ZREG(SVE_NUM_ZREGS - 1,
401*4882a593Smuzhiyun 						       SVE_NUM_SLICES - 1);
402*4882a593Smuzhiyun 
403*4882a593Smuzhiyun 	/* reg ID ranges for P- registers and FFR (which are contiguous) */
404*4882a593Smuzhiyun 	const u64 preg_id_min = KVM_REG_ARM64_SVE_PREG(0, 0);
405*4882a593Smuzhiyun 	const u64 preg_id_max = KVM_REG_ARM64_SVE_FFR(SVE_NUM_SLICES - 1);
406*4882a593Smuzhiyun 
407*4882a593Smuzhiyun 	unsigned int vq;
408*4882a593Smuzhiyun 	unsigned int reg_num;
409*4882a593Smuzhiyun 
410*4882a593Smuzhiyun 	unsigned int reqoffset, reqlen; /* User-requested offset and length */
411*4882a593Smuzhiyun 	unsigned int maxlen; /* Maximum permitted length */
412*4882a593Smuzhiyun 
413*4882a593Smuzhiyun 	size_t sve_state_size;
414*4882a593Smuzhiyun 
415*4882a593Smuzhiyun 	const u64 last_preg_id = KVM_REG_ARM64_SVE_PREG(SVE_NUM_PREGS - 1,
416*4882a593Smuzhiyun 							SVE_NUM_SLICES - 1);
417*4882a593Smuzhiyun 
418*4882a593Smuzhiyun 	/* Verify that the P-regs and FFR really do have contiguous IDs: */
419*4882a593Smuzhiyun 	BUILD_BUG_ON(KVM_REG_ARM64_SVE_FFR(0) != last_preg_id + 1);
420*4882a593Smuzhiyun 
421*4882a593Smuzhiyun 	/* Verify that we match the UAPI header: */
422*4882a593Smuzhiyun 	BUILD_BUG_ON(SVE_NUM_SLICES != KVM_ARM64_SVE_MAX_SLICES);
423*4882a593Smuzhiyun 
424*4882a593Smuzhiyun 	reg_num = (reg->id & SVE_REG_ID_MASK) >> SVE_REG_ID_SHIFT;
425*4882a593Smuzhiyun 
426*4882a593Smuzhiyun 	if (reg->id >= zreg_id_min && reg->id <= zreg_id_max) {
427*4882a593Smuzhiyun 		if (!vcpu_has_sve(vcpu) || (reg->id & SVE_REG_SLICE_MASK) > 0)
428*4882a593Smuzhiyun 			return -ENOENT;
429*4882a593Smuzhiyun 
430*4882a593Smuzhiyun 		vq = vcpu_sve_max_vq(vcpu);
431*4882a593Smuzhiyun 
432*4882a593Smuzhiyun 		reqoffset = SVE_SIG_ZREG_OFFSET(vq, reg_num) -
433*4882a593Smuzhiyun 				SVE_SIG_REGS_OFFSET;
434*4882a593Smuzhiyun 		reqlen = KVM_SVE_ZREG_SIZE;
435*4882a593Smuzhiyun 		maxlen = SVE_SIG_ZREG_SIZE(vq);
436*4882a593Smuzhiyun 	} else if (reg->id >= preg_id_min && reg->id <= preg_id_max) {
437*4882a593Smuzhiyun 		if (!vcpu_has_sve(vcpu) || (reg->id & SVE_REG_SLICE_MASK) > 0)
438*4882a593Smuzhiyun 			return -ENOENT;
439*4882a593Smuzhiyun 
440*4882a593Smuzhiyun 		vq = vcpu_sve_max_vq(vcpu);
441*4882a593Smuzhiyun 
442*4882a593Smuzhiyun 		reqoffset = SVE_SIG_PREG_OFFSET(vq, reg_num) -
443*4882a593Smuzhiyun 				SVE_SIG_REGS_OFFSET;
444*4882a593Smuzhiyun 		reqlen = KVM_SVE_PREG_SIZE;
445*4882a593Smuzhiyun 		maxlen = SVE_SIG_PREG_SIZE(vq);
446*4882a593Smuzhiyun 	} else {
447*4882a593Smuzhiyun 		return -EINVAL;
448*4882a593Smuzhiyun 	}
449*4882a593Smuzhiyun 
450*4882a593Smuzhiyun 	sve_state_size = vcpu_sve_state_size(vcpu);
451*4882a593Smuzhiyun 	if (WARN_ON(!sve_state_size))
452*4882a593Smuzhiyun 		return -EINVAL;
453*4882a593Smuzhiyun 
454*4882a593Smuzhiyun 	region->koffset = array_index_nospec(reqoffset, sve_state_size);
455*4882a593Smuzhiyun 	region->klen = min(maxlen, reqlen);
456*4882a593Smuzhiyun 	region->upad = reqlen - region->klen;
457*4882a593Smuzhiyun 
458*4882a593Smuzhiyun 	return 0;
459*4882a593Smuzhiyun }
460*4882a593Smuzhiyun 
get_sve_reg(struct kvm_vcpu * vcpu,const struct kvm_one_reg * reg)461*4882a593Smuzhiyun static int get_sve_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
462*4882a593Smuzhiyun {
463*4882a593Smuzhiyun 	int ret;
464*4882a593Smuzhiyun 	struct sve_state_reg_region region;
465*4882a593Smuzhiyun 	char __user *uptr = (char __user *)reg->addr;
466*4882a593Smuzhiyun 
467*4882a593Smuzhiyun 	/* Handle the KVM_REG_ARM64_SVE_VLS pseudo-reg as a special case: */
468*4882a593Smuzhiyun 	if (reg->id == KVM_REG_ARM64_SVE_VLS)
469*4882a593Smuzhiyun 		return get_sve_vls(vcpu, reg);
470*4882a593Smuzhiyun 
471*4882a593Smuzhiyun 	/* Try to interpret reg ID as an architectural SVE register... */
472*4882a593Smuzhiyun 	ret = sve_reg_to_region(&region, vcpu, reg);
473*4882a593Smuzhiyun 	if (ret)
474*4882a593Smuzhiyun 		return ret;
475*4882a593Smuzhiyun 
476*4882a593Smuzhiyun 	if (!kvm_arm_vcpu_sve_finalized(vcpu))
477*4882a593Smuzhiyun 		return -EPERM;
478*4882a593Smuzhiyun 
479*4882a593Smuzhiyun 	if (copy_to_user(uptr, vcpu->arch.sve_state + region.koffset,
480*4882a593Smuzhiyun 			 region.klen) ||
481*4882a593Smuzhiyun 	    clear_user(uptr + region.klen, region.upad))
482*4882a593Smuzhiyun 		return -EFAULT;
483*4882a593Smuzhiyun 
484*4882a593Smuzhiyun 	return 0;
485*4882a593Smuzhiyun }
486*4882a593Smuzhiyun 
set_sve_reg(struct kvm_vcpu * vcpu,const struct kvm_one_reg * reg)487*4882a593Smuzhiyun static int set_sve_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
488*4882a593Smuzhiyun {
489*4882a593Smuzhiyun 	int ret;
490*4882a593Smuzhiyun 	struct sve_state_reg_region region;
491*4882a593Smuzhiyun 	const char __user *uptr = (const char __user *)reg->addr;
492*4882a593Smuzhiyun 
493*4882a593Smuzhiyun 	/* Handle the KVM_REG_ARM64_SVE_VLS pseudo-reg as a special case: */
494*4882a593Smuzhiyun 	if (reg->id == KVM_REG_ARM64_SVE_VLS)
495*4882a593Smuzhiyun 		return set_sve_vls(vcpu, reg);
496*4882a593Smuzhiyun 
497*4882a593Smuzhiyun 	/* Try to interpret reg ID as an architectural SVE register... */
498*4882a593Smuzhiyun 	ret = sve_reg_to_region(&region, vcpu, reg);
499*4882a593Smuzhiyun 	if (ret)
500*4882a593Smuzhiyun 		return ret;
501*4882a593Smuzhiyun 
502*4882a593Smuzhiyun 	if (!kvm_arm_vcpu_sve_finalized(vcpu))
503*4882a593Smuzhiyun 		return -EPERM;
504*4882a593Smuzhiyun 
505*4882a593Smuzhiyun 	if (copy_from_user(vcpu->arch.sve_state + region.koffset, uptr,
506*4882a593Smuzhiyun 			   region.klen))
507*4882a593Smuzhiyun 		return -EFAULT;
508*4882a593Smuzhiyun 
509*4882a593Smuzhiyun 	return 0;
510*4882a593Smuzhiyun }
511*4882a593Smuzhiyun 
kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu * vcpu,struct kvm_regs * regs)512*4882a593Smuzhiyun int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
513*4882a593Smuzhiyun {
514*4882a593Smuzhiyun 	return -EINVAL;
515*4882a593Smuzhiyun }
516*4882a593Smuzhiyun 
kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu * vcpu,struct kvm_regs * regs)517*4882a593Smuzhiyun int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
518*4882a593Smuzhiyun {
519*4882a593Smuzhiyun 	return -EINVAL;
520*4882a593Smuzhiyun }
521*4882a593Smuzhiyun 
copy_core_reg_indices(const struct kvm_vcpu * vcpu,u64 __user * uindices)522*4882a593Smuzhiyun static int copy_core_reg_indices(const struct kvm_vcpu *vcpu,
523*4882a593Smuzhiyun 				 u64 __user *uindices)
524*4882a593Smuzhiyun {
525*4882a593Smuzhiyun 	unsigned int i;
526*4882a593Smuzhiyun 	int n = 0;
527*4882a593Smuzhiyun 
528*4882a593Smuzhiyun 	for (i = 0; i < sizeof(struct kvm_regs) / sizeof(__u32); i++) {
529*4882a593Smuzhiyun 		u64 reg = KVM_REG_ARM64 | KVM_REG_ARM_CORE | i;
530*4882a593Smuzhiyun 		int size = core_reg_size_from_offset(vcpu, i);
531*4882a593Smuzhiyun 
532*4882a593Smuzhiyun 		if (size < 0)
533*4882a593Smuzhiyun 			continue;
534*4882a593Smuzhiyun 
535*4882a593Smuzhiyun 		switch (size) {
536*4882a593Smuzhiyun 		case sizeof(__u32):
537*4882a593Smuzhiyun 			reg |= KVM_REG_SIZE_U32;
538*4882a593Smuzhiyun 			break;
539*4882a593Smuzhiyun 
540*4882a593Smuzhiyun 		case sizeof(__u64):
541*4882a593Smuzhiyun 			reg |= KVM_REG_SIZE_U64;
542*4882a593Smuzhiyun 			break;
543*4882a593Smuzhiyun 
544*4882a593Smuzhiyun 		case sizeof(__uint128_t):
545*4882a593Smuzhiyun 			reg |= KVM_REG_SIZE_U128;
546*4882a593Smuzhiyun 			break;
547*4882a593Smuzhiyun 
548*4882a593Smuzhiyun 		default:
549*4882a593Smuzhiyun 			WARN_ON(1);
550*4882a593Smuzhiyun 			continue;
551*4882a593Smuzhiyun 		}
552*4882a593Smuzhiyun 
553*4882a593Smuzhiyun 		if (uindices) {
554*4882a593Smuzhiyun 			if (put_user(reg, uindices))
555*4882a593Smuzhiyun 				return -EFAULT;
556*4882a593Smuzhiyun 			uindices++;
557*4882a593Smuzhiyun 		}
558*4882a593Smuzhiyun 
559*4882a593Smuzhiyun 		n++;
560*4882a593Smuzhiyun 	}
561*4882a593Smuzhiyun 
562*4882a593Smuzhiyun 	return n;
563*4882a593Smuzhiyun }
564*4882a593Smuzhiyun 
num_core_regs(const struct kvm_vcpu * vcpu)565*4882a593Smuzhiyun static unsigned long num_core_regs(const struct kvm_vcpu *vcpu)
566*4882a593Smuzhiyun {
567*4882a593Smuzhiyun 	return copy_core_reg_indices(vcpu, NULL);
568*4882a593Smuzhiyun }
569*4882a593Smuzhiyun 
570*4882a593Smuzhiyun /**
571*4882a593Smuzhiyun  * ARM64 versions of the TIMER registers, always available on arm64
572*4882a593Smuzhiyun  */
573*4882a593Smuzhiyun 
574*4882a593Smuzhiyun #define NUM_TIMER_REGS 3
575*4882a593Smuzhiyun 
is_timer_reg(u64 index)576*4882a593Smuzhiyun static bool is_timer_reg(u64 index)
577*4882a593Smuzhiyun {
578*4882a593Smuzhiyun 	switch (index) {
579*4882a593Smuzhiyun 	case KVM_REG_ARM_TIMER_CTL:
580*4882a593Smuzhiyun 	case KVM_REG_ARM_TIMER_CNT:
581*4882a593Smuzhiyun 	case KVM_REG_ARM_TIMER_CVAL:
582*4882a593Smuzhiyun 		return true;
583*4882a593Smuzhiyun 	}
584*4882a593Smuzhiyun 	return false;
585*4882a593Smuzhiyun }
586*4882a593Smuzhiyun 
copy_timer_indices(struct kvm_vcpu * vcpu,u64 __user * uindices)587*4882a593Smuzhiyun static int copy_timer_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
588*4882a593Smuzhiyun {
589*4882a593Smuzhiyun 	if (put_user(KVM_REG_ARM_TIMER_CTL, uindices))
590*4882a593Smuzhiyun 		return -EFAULT;
591*4882a593Smuzhiyun 	uindices++;
592*4882a593Smuzhiyun 	if (put_user(KVM_REG_ARM_TIMER_CNT, uindices))
593*4882a593Smuzhiyun 		return -EFAULT;
594*4882a593Smuzhiyun 	uindices++;
595*4882a593Smuzhiyun 	if (put_user(KVM_REG_ARM_TIMER_CVAL, uindices))
596*4882a593Smuzhiyun 		return -EFAULT;
597*4882a593Smuzhiyun 
598*4882a593Smuzhiyun 	return 0;
599*4882a593Smuzhiyun }
600*4882a593Smuzhiyun 
set_timer_reg(struct kvm_vcpu * vcpu,const struct kvm_one_reg * reg)601*4882a593Smuzhiyun static int set_timer_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
602*4882a593Smuzhiyun {
603*4882a593Smuzhiyun 	void __user *uaddr = (void __user *)(long)reg->addr;
604*4882a593Smuzhiyun 	u64 val;
605*4882a593Smuzhiyun 	int ret;
606*4882a593Smuzhiyun 
607*4882a593Smuzhiyun 	ret = copy_from_user(&val, uaddr, KVM_REG_SIZE(reg->id));
608*4882a593Smuzhiyun 	if (ret != 0)
609*4882a593Smuzhiyun 		return -EFAULT;
610*4882a593Smuzhiyun 
611*4882a593Smuzhiyun 	return kvm_arm_timer_set_reg(vcpu, reg->id, val);
612*4882a593Smuzhiyun }
613*4882a593Smuzhiyun 
get_timer_reg(struct kvm_vcpu * vcpu,const struct kvm_one_reg * reg)614*4882a593Smuzhiyun static int get_timer_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
615*4882a593Smuzhiyun {
616*4882a593Smuzhiyun 	void __user *uaddr = (void __user *)(long)reg->addr;
617*4882a593Smuzhiyun 	u64 val;
618*4882a593Smuzhiyun 
619*4882a593Smuzhiyun 	val = kvm_arm_timer_get_reg(vcpu, reg->id);
620*4882a593Smuzhiyun 	return copy_to_user(uaddr, &val, KVM_REG_SIZE(reg->id)) ? -EFAULT : 0;
621*4882a593Smuzhiyun }
622*4882a593Smuzhiyun 
num_sve_regs(const struct kvm_vcpu * vcpu)623*4882a593Smuzhiyun static unsigned long num_sve_regs(const struct kvm_vcpu *vcpu)
624*4882a593Smuzhiyun {
625*4882a593Smuzhiyun 	const unsigned int slices = vcpu_sve_slices(vcpu);
626*4882a593Smuzhiyun 
627*4882a593Smuzhiyun 	if (!vcpu_has_sve(vcpu))
628*4882a593Smuzhiyun 		return 0;
629*4882a593Smuzhiyun 
630*4882a593Smuzhiyun 	/* Policed by KVM_GET_REG_LIST: */
631*4882a593Smuzhiyun 	WARN_ON(!kvm_arm_vcpu_sve_finalized(vcpu));
632*4882a593Smuzhiyun 
633*4882a593Smuzhiyun 	return slices * (SVE_NUM_PREGS + SVE_NUM_ZREGS + 1 /* FFR */)
634*4882a593Smuzhiyun 		+ 1; /* KVM_REG_ARM64_SVE_VLS */
635*4882a593Smuzhiyun }
636*4882a593Smuzhiyun 
copy_sve_reg_indices(const struct kvm_vcpu * vcpu,u64 __user * uindices)637*4882a593Smuzhiyun static int copy_sve_reg_indices(const struct kvm_vcpu *vcpu,
638*4882a593Smuzhiyun 				u64 __user *uindices)
639*4882a593Smuzhiyun {
640*4882a593Smuzhiyun 	const unsigned int slices = vcpu_sve_slices(vcpu);
641*4882a593Smuzhiyun 	u64 reg;
642*4882a593Smuzhiyun 	unsigned int i, n;
643*4882a593Smuzhiyun 	int num_regs = 0;
644*4882a593Smuzhiyun 
645*4882a593Smuzhiyun 	if (!vcpu_has_sve(vcpu))
646*4882a593Smuzhiyun 		return 0;
647*4882a593Smuzhiyun 
648*4882a593Smuzhiyun 	/* Policed by KVM_GET_REG_LIST: */
649*4882a593Smuzhiyun 	WARN_ON(!kvm_arm_vcpu_sve_finalized(vcpu));
650*4882a593Smuzhiyun 
651*4882a593Smuzhiyun 	/*
652*4882a593Smuzhiyun 	 * Enumerate this first, so that userspace can save/restore in
653*4882a593Smuzhiyun 	 * the order reported by KVM_GET_REG_LIST:
654*4882a593Smuzhiyun 	 */
655*4882a593Smuzhiyun 	reg = KVM_REG_ARM64_SVE_VLS;
656*4882a593Smuzhiyun 	if (put_user(reg, uindices++))
657*4882a593Smuzhiyun 		return -EFAULT;
658*4882a593Smuzhiyun 	++num_regs;
659*4882a593Smuzhiyun 
660*4882a593Smuzhiyun 	for (i = 0; i < slices; i++) {
661*4882a593Smuzhiyun 		for (n = 0; n < SVE_NUM_ZREGS; n++) {
662*4882a593Smuzhiyun 			reg = KVM_REG_ARM64_SVE_ZREG(n, i);
663*4882a593Smuzhiyun 			if (put_user(reg, uindices++))
664*4882a593Smuzhiyun 				return -EFAULT;
665*4882a593Smuzhiyun 			num_regs++;
666*4882a593Smuzhiyun 		}
667*4882a593Smuzhiyun 
668*4882a593Smuzhiyun 		for (n = 0; n < SVE_NUM_PREGS; n++) {
669*4882a593Smuzhiyun 			reg = KVM_REG_ARM64_SVE_PREG(n, i);
670*4882a593Smuzhiyun 			if (put_user(reg, uindices++))
671*4882a593Smuzhiyun 				return -EFAULT;
672*4882a593Smuzhiyun 			num_regs++;
673*4882a593Smuzhiyun 		}
674*4882a593Smuzhiyun 
675*4882a593Smuzhiyun 		reg = KVM_REG_ARM64_SVE_FFR(i);
676*4882a593Smuzhiyun 		if (put_user(reg, uindices++))
677*4882a593Smuzhiyun 			return -EFAULT;
678*4882a593Smuzhiyun 		num_regs++;
679*4882a593Smuzhiyun 	}
680*4882a593Smuzhiyun 
681*4882a593Smuzhiyun 	return num_regs;
682*4882a593Smuzhiyun }
683*4882a593Smuzhiyun 
684*4882a593Smuzhiyun /**
685*4882a593Smuzhiyun  * kvm_arm_num_regs - how many registers do we present via KVM_GET_ONE_REG
686*4882a593Smuzhiyun  *
687*4882a593Smuzhiyun  * This is for all registers.
688*4882a593Smuzhiyun  */
kvm_arm_num_regs(struct kvm_vcpu * vcpu)689*4882a593Smuzhiyun unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu)
690*4882a593Smuzhiyun {
691*4882a593Smuzhiyun 	unsigned long res = 0;
692*4882a593Smuzhiyun 
693*4882a593Smuzhiyun 	res += num_core_regs(vcpu);
694*4882a593Smuzhiyun 	res += num_sve_regs(vcpu);
695*4882a593Smuzhiyun 	res += kvm_arm_num_sys_reg_descs(vcpu);
696*4882a593Smuzhiyun 	res += kvm_arm_get_fw_num_regs(vcpu);
697*4882a593Smuzhiyun 	res += NUM_TIMER_REGS;
698*4882a593Smuzhiyun 
699*4882a593Smuzhiyun 	return res;
700*4882a593Smuzhiyun }
701*4882a593Smuzhiyun 
702*4882a593Smuzhiyun /**
703*4882a593Smuzhiyun  * kvm_arm_copy_reg_indices - get indices of all registers.
704*4882a593Smuzhiyun  *
705*4882a593Smuzhiyun  * We do core registers right here, then we append system regs.
706*4882a593Smuzhiyun  */
kvm_arm_copy_reg_indices(struct kvm_vcpu * vcpu,u64 __user * uindices)707*4882a593Smuzhiyun int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
708*4882a593Smuzhiyun {
709*4882a593Smuzhiyun 	int ret;
710*4882a593Smuzhiyun 
711*4882a593Smuzhiyun 	ret = copy_core_reg_indices(vcpu, uindices);
712*4882a593Smuzhiyun 	if (ret < 0)
713*4882a593Smuzhiyun 		return ret;
714*4882a593Smuzhiyun 	uindices += ret;
715*4882a593Smuzhiyun 
716*4882a593Smuzhiyun 	ret = copy_sve_reg_indices(vcpu, uindices);
717*4882a593Smuzhiyun 	if (ret < 0)
718*4882a593Smuzhiyun 		return ret;
719*4882a593Smuzhiyun 	uindices += ret;
720*4882a593Smuzhiyun 
721*4882a593Smuzhiyun 	ret = kvm_arm_copy_fw_reg_indices(vcpu, uindices);
722*4882a593Smuzhiyun 	if (ret < 0)
723*4882a593Smuzhiyun 		return ret;
724*4882a593Smuzhiyun 	uindices += kvm_arm_get_fw_num_regs(vcpu);
725*4882a593Smuzhiyun 
726*4882a593Smuzhiyun 	ret = copy_timer_indices(vcpu, uindices);
727*4882a593Smuzhiyun 	if (ret < 0)
728*4882a593Smuzhiyun 		return ret;
729*4882a593Smuzhiyun 	uindices += NUM_TIMER_REGS;
730*4882a593Smuzhiyun 
731*4882a593Smuzhiyun 	return kvm_arm_copy_sys_reg_indices(vcpu, uindices);
732*4882a593Smuzhiyun }
733*4882a593Smuzhiyun 
kvm_arm_get_reg(struct kvm_vcpu * vcpu,const struct kvm_one_reg * reg)734*4882a593Smuzhiyun int kvm_arm_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
735*4882a593Smuzhiyun {
736*4882a593Smuzhiyun 	/* We currently use nothing arch-specific in upper 32 bits */
737*4882a593Smuzhiyun 	if ((reg->id & ~KVM_REG_SIZE_MASK) >> 32 != KVM_REG_ARM64 >> 32)
738*4882a593Smuzhiyun 		return -EINVAL;
739*4882a593Smuzhiyun 
740*4882a593Smuzhiyun 	switch (reg->id & KVM_REG_ARM_COPROC_MASK) {
741*4882a593Smuzhiyun 	case KVM_REG_ARM_CORE:	return get_core_reg(vcpu, reg);
742*4882a593Smuzhiyun 	case KVM_REG_ARM_FW:	return kvm_arm_get_fw_reg(vcpu, reg);
743*4882a593Smuzhiyun 	case KVM_REG_ARM64_SVE:	return get_sve_reg(vcpu, reg);
744*4882a593Smuzhiyun 	}
745*4882a593Smuzhiyun 
746*4882a593Smuzhiyun 	if (is_timer_reg(reg->id))
747*4882a593Smuzhiyun 		return get_timer_reg(vcpu, reg);
748*4882a593Smuzhiyun 
749*4882a593Smuzhiyun 	return kvm_arm_sys_reg_get_reg(vcpu, reg);
750*4882a593Smuzhiyun }
751*4882a593Smuzhiyun 
kvm_arm_set_reg(struct kvm_vcpu * vcpu,const struct kvm_one_reg * reg)752*4882a593Smuzhiyun int kvm_arm_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
753*4882a593Smuzhiyun {
754*4882a593Smuzhiyun 	/* We currently use nothing arch-specific in upper 32 bits */
755*4882a593Smuzhiyun 	if ((reg->id & ~KVM_REG_SIZE_MASK) >> 32 != KVM_REG_ARM64 >> 32)
756*4882a593Smuzhiyun 		return -EINVAL;
757*4882a593Smuzhiyun 
758*4882a593Smuzhiyun 	switch (reg->id & KVM_REG_ARM_COPROC_MASK) {
759*4882a593Smuzhiyun 	case KVM_REG_ARM_CORE:	return set_core_reg(vcpu, reg);
760*4882a593Smuzhiyun 	case KVM_REG_ARM_FW:	return kvm_arm_set_fw_reg(vcpu, reg);
761*4882a593Smuzhiyun 	case KVM_REG_ARM64_SVE:	return set_sve_reg(vcpu, reg);
762*4882a593Smuzhiyun 	}
763*4882a593Smuzhiyun 
764*4882a593Smuzhiyun 	if (is_timer_reg(reg->id))
765*4882a593Smuzhiyun 		return set_timer_reg(vcpu, reg);
766*4882a593Smuzhiyun 
767*4882a593Smuzhiyun 	return kvm_arm_sys_reg_set_reg(vcpu, reg);
768*4882a593Smuzhiyun }
769*4882a593Smuzhiyun 
kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu * vcpu,struct kvm_sregs * sregs)770*4882a593Smuzhiyun int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
771*4882a593Smuzhiyun 				  struct kvm_sregs *sregs)
772*4882a593Smuzhiyun {
773*4882a593Smuzhiyun 	return -EINVAL;
774*4882a593Smuzhiyun }
775*4882a593Smuzhiyun 
kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu * vcpu,struct kvm_sregs * sregs)776*4882a593Smuzhiyun int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
777*4882a593Smuzhiyun 				  struct kvm_sregs *sregs)
778*4882a593Smuzhiyun {
779*4882a593Smuzhiyun 	return -EINVAL;
780*4882a593Smuzhiyun }
781*4882a593Smuzhiyun 
__kvm_arm_vcpu_get_events(struct kvm_vcpu * vcpu,struct kvm_vcpu_events * events)782*4882a593Smuzhiyun int __kvm_arm_vcpu_get_events(struct kvm_vcpu *vcpu,
783*4882a593Smuzhiyun 			      struct kvm_vcpu_events *events)
784*4882a593Smuzhiyun {
785*4882a593Smuzhiyun 	events->exception.serror_pending = !!(vcpu->arch.hcr_el2 & HCR_VSE);
786*4882a593Smuzhiyun 	events->exception.serror_has_esr = cpus_have_const_cap(ARM64_HAS_RAS_EXTN);
787*4882a593Smuzhiyun 
788*4882a593Smuzhiyun 	if (events->exception.serror_pending && events->exception.serror_has_esr)
789*4882a593Smuzhiyun 		events->exception.serror_esr = vcpu_get_vsesr(vcpu);
790*4882a593Smuzhiyun 
791*4882a593Smuzhiyun 	/*
792*4882a593Smuzhiyun 	 * We never return a pending ext_dabt here because we deliver it to
793*4882a593Smuzhiyun 	 * the virtual CPU directly when setting the event and it's no longer
794*4882a593Smuzhiyun 	 * 'pending' at this point.
795*4882a593Smuzhiyun 	 */
796*4882a593Smuzhiyun 
797*4882a593Smuzhiyun 	return 0;
798*4882a593Smuzhiyun }
799*4882a593Smuzhiyun 
__kvm_arm_vcpu_set_events(struct kvm_vcpu * vcpu,struct kvm_vcpu_events * events)800*4882a593Smuzhiyun int __kvm_arm_vcpu_set_events(struct kvm_vcpu *vcpu,
801*4882a593Smuzhiyun 			      struct kvm_vcpu_events *events)
802*4882a593Smuzhiyun {
803*4882a593Smuzhiyun 	bool serror_pending = events->exception.serror_pending;
804*4882a593Smuzhiyun 	bool has_esr = events->exception.serror_has_esr;
805*4882a593Smuzhiyun 	bool ext_dabt_pending = events->exception.ext_dabt_pending;
806*4882a593Smuzhiyun 
807*4882a593Smuzhiyun 	if (serror_pending && has_esr) {
808*4882a593Smuzhiyun 		if (!cpus_have_const_cap(ARM64_HAS_RAS_EXTN))
809*4882a593Smuzhiyun 			return -EINVAL;
810*4882a593Smuzhiyun 
811*4882a593Smuzhiyun 		if (!((events->exception.serror_esr) & ~ESR_ELx_ISS_MASK))
812*4882a593Smuzhiyun 			kvm_set_sei_esr(vcpu, events->exception.serror_esr);
813*4882a593Smuzhiyun 		else
814*4882a593Smuzhiyun 			return -EINVAL;
815*4882a593Smuzhiyun 	} else if (serror_pending) {
816*4882a593Smuzhiyun 		kvm_inject_vabt(vcpu);
817*4882a593Smuzhiyun 	}
818*4882a593Smuzhiyun 
819*4882a593Smuzhiyun 	if (ext_dabt_pending)
820*4882a593Smuzhiyun 		kvm_inject_dabt(vcpu, kvm_vcpu_get_hfar(vcpu));
821*4882a593Smuzhiyun 
822*4882a593Smuzhiyun 	return 0;
823*4882a593Smuzhiyun }
824*4882a593Smuzhiyun 
kvm_target_cpu(void)825*4882a593Smuzhiyun int __attribute_const__ kvm_target_cpu(void)
826*4882a593Smuzhiyun {
827*4882a593Smuzhiyun 	unsigned long implementor = read_cpuid_implementor();
828*4882a593Smuzhiyun 	unsigned long part_number = read_cpuid_part_number();
829*4882a593Smuzhiyun 
830*4882a593Smuzhiyun 	switch (implementor) {
831*4882a593Smuzhiyun 	case ARM_CPU_IMP_ARM:
832*4882a593Smuzhiyun 		switch (part_number) {
833*4882a593Smuzhiyun 		case ARM_CPU_PART_AEM_V8:
834*4882a593Smuzhiyun 			return KVM_ARM_TARGET_AEM_V8;
835*4882a593Smuzhiyun 		case ARM_CPU_PART_FOUNDATION:
836*4882a593Smuzhiyun 			return KVM_ARM_TARGET_FOUNDATION_V8;
837*4882a593Smuzhiyun 		case ARM_CPU_PART_CORTEX_A53:
838*4882a593Smuzhiyun 			return KVM_ARM_TARGET_CORTEX_A53;
839*4882a593Smuzhiyun 		case ARM_CPU_PART_CORTEX_A57:
840*4882a593Smuzhiyun 			return KVM_ARM_TARGET_CORTEX_A57;
841*4882a593Smuzhiyun 		}
842*4882a593Smuzhiyun 		break;
843*4882a593Smuzhiyun 	case ARM_CPU_IMP_APM:
844*4882a593Smuzhiyun 		switch (part_number) {
845*4882a593Smuzhiyun 		case APM_CPU_PART_POTENZA:
846*4882a593Smuzhiyun 			return KVM_ARM_TARGET_XGENE_POTENZA;
847*4882a593Smuzhiyun 		}
848*4882a593Smuzhiyun 		break;
849*4882a593Smuzhiyun 	}
850*4882a593Smuzhiyun 
851*4882a593Smuzhiyun 	/* Return a default generic target */
852*4882a593Smuzhiyun 	return KVM_ARM_TARGET_GENERIC_V8;
853*4882a593Smuzhiyun }
854*4882a593Smuzhiyun 
kvm_vcpu_preferred_target(struct kvm_vcpu_init * init)855*4882a593Smuzhiyun int kvm_vcpu_preferred_target(struct kvm_vcpu_init *init)
856*4882a593Smuzhiyun {
857*4882a593Smuzhiyun 	int target = kvm_target_cpu();
858*4882a593Smuzhiyun 
859*4882a593Smuzhiyun 	if (target < 0)
860*4882a593Smuzhiyun 		return -ENODEV;
861*4882a593Smuzhiyun 
862*4882a593Smuzhiyun 	memset(init, 0, sizeof(*init));
863*4882a593Smuzhiyun 
864*4882a593Smuzhiyun 	/*
865*4882a593Smuzhiyun 	 * For now, we don't return any features.
866*4882a593Smuzhiyun 	 * In future, we might use features to return target
867*4882a593Smuzhiyun 	 * specific features available for the preferred
868*4882a593Smuzhiyun 	 * target type.
869*4882a593Smuzhiyun 	 */
870*4882a593Smuzhiyun 	init->target = (__u32)target;
871*4882a593Smuzhiyun 
872*4882a593Smuzhiyun 	return 0;
873*4882a593Smuzhiyun }
874*4882a593Smuzhiyun 
kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu * vcpu,struct kvm_fpu * fpu)875*4882a593Smuzhiyun int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
876*4882a593Smuzhiyun {
877*4882a593Smuzhiyun 	return -EINVAL;
878*4882a593Smuzhiyun }
879*4882a593Smuzhiyun 
kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu * vcpu,struct kvm_fpu * fpu)880*4882a593Smuzhiyun int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
881*4882a593Smuzhiyun {
882*4882a593Smuzhiyun 	return -EINVAL;
883*4882a593Smuzhiyun }
884*4882a593Smuzhiyun 
kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu * vcpu,struct kvm_translation * tr)885*4882a593Smuzhiyun int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
886*4882a593Smuzhiyun 				  struct kvm_translation *tr)
887*4882a593Smuzhiyun {
888*4882a593Smuzhiyun 	return -EINVAL;
889*4882a593Smuzhiyun }
890*4882a593Smuzhiyun 
891*4882a593Smuzhiyun #define KVM_GUESTDBG_VALID_MASK (KVM_GUESTDBG_ENABLE |    \
892*4882a593Smuzhiyun 			    KVM_GUESTDBG_USE_SW_BP | \
893*4882a593Smuzhiyun 			    KVM_GUESTDBG_USE_HW | \
894*4882a593Smuzhiyun 			    KVM_GUESTDBG_SINGLESTEP)
895*4882a593Smuzhiyun 
896*4882a593Smuzhiyun /**
897*4882a593Smuzhiyun  * kvm_arch_vcpu_ioctl_set_guest_debug - set up guest debugging
898*4882a593Smuzhiyun  * @kvm:	pointer to the KVM struct
899*4882a593Smuzhiyun  * @kvm_guest_debug: the ioctl data buffer
900*4882a593Smuzhiyun  *
901*4882a593Smuzhiyun  * This sets up and enables the VM for guest debugging. Userspace
902*4882a593Smuzhiyun  * passes in a control flag to enable different debug types and
903*4882a593Smuzhiyun  * potentially other architecture specific information in the rest of
904*4882a593Smuzhiyun  * the structure.
905*4882a593Smuzhiyun  */
kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu * vcpu,struct kvm_guest_debug * dbg)906*4882a593Smuzhiyun int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
907*4882a593Smuzhiyun 					struct kvm_guest_debug *dbg)
908*4882a593Smuzhiyun {
909*4882a593Smuzhiyun 	int ret = 0;
910*4882a593Smuzhiyun 
911*4882a593Smuzhiyun 	trace_kvm_set_guest_debug(vcpu, dbg->control);
912*4882a593Smuzhiyun 
913*4882a593Smuzhiyun 	if (dbg->control & ~KVM_GUESTDBG_VALID_MASK) {
914*4882a593Smuzhiyun 		ret = -EINVAL;
915*4882a593Smuzhiyun 		goto out;
916*4882a593Smuzhiyun 	}
917*4882a593Smuzhiyun 
918*4882a593Smuzhiyun 	if (dbg->control & KVM_GUESTDBG_ENABLE) {
919*4882a593Smuzhiyun 		vcpu->guest_debug = dbg->control;
920*4882a593Smuzhiyun 
921*4882a593Smuzhiyun 		/* Hardware assisted Break and Watch points */
922*4882a593Smuzhiyun 		if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW) {
923*4882a593Smuzhiyun 			vcpu->arch.external_debug_state = dbg->arch;
924*4882a593Smuzhiyun 		}
925*4882a593Smuzhiyun 
926*4882a593Smuzhiyun 	} else {
927*4882a593Smuzhiyun 		/* If not enabled clear all flags */
928*4882a593Smuzhiyun 		vcpu->guest_debug = 0;
929*4882a593Smuzhiyun 	}
930*4882a593Smuzhiyun 
931*4882a593Smuzhiyun out:
932*4882a593Smuzhiyun 	return ret;
933*4882a593Smuzhiyun }
934*4882a593Smuzhiyun 
kvm_arm_vcpu_arch_set_attr(struct kvm_vcpu * vcpu,struct kvm_device_attr * attr)935*4882a593Smuzhiyun int kvm_arm_vcpu_arch_set_attr(struct kvm_vcpu *vcpu,
936*4882a593Smuzhiyun 			       struct kvm_device_attr *attr)
937*4882a593Smuzhiyun {
938*4882a593Smuzhiyun 	int ret;
939*4882a593Smuzhiyun 
940*4882a593Smuzhiyun 	switch (attr->group) {
941*4882a593Smuzhiyun 	case KVM_ARM_VCPU_PMU_V3_CTRL:
942*4882a593Smuzhiyun 		ret = kvm_arm_pmu_v3_set_attr(vcpu, attr);
943*4882a593Smuzhiyun 		break;
944*4882a593Smuzhiyun 	case KVM_ARM_VCPU_TIMER_CTRL:
945*4882a593Smuzhiyun 		ret = kvm_arm_timer_set_attr(vcpu, attr);
946*4882a593Smuzhiyun 		break;
947*4882a593Smuzhiyun 	case KVM_ARM_VCPU_PVTIME_CTRL:
948*4882a593Smuzhiyun 		ret = kvm_arm_pvtime_set_attr(vcpu, attr);
949*4882a593Smuzhiyun 		break;
950*4882a593Smuzhiyun 	default:
951*4882a593Smuzhiyun 		ret = -ENXIO;
952*4882a593Smuzhiyun 		break;
953*4882a593Smuzhiyun 	}
954*4882a593Smuzhiyun 
955*4882a593Smuzhiyun 	return ret;
956*4882a593Smuzhiyun }
957*4882a593Smuzhiyun 
kvm_arm_vcpu_arch_get_attr(struct kvm_vcpu * vcpu,struct kvm_device_attr * attr)958*4882a593Smuzhiyun int kvm_arm_vcpu_arch_get_attr(struct kvm_vcpu *vcpu,
959*4882a593Smuzhiyun 			       struct kvm_device_attr *attr)
960*4882a593Smuzhiyun {
961*4882a593Smuzhiyun 	int ret;
962*4882a593Smuzhiyun 
963*4882a593Smuzhiyun 	switch (attr->group) {
964*4882a593Smuzhiyun 	case KVM_ARM_VCPU_PMU_V3_CTRL:
965*4882a593Smuzhiyun 		ret = kvm_arm_pmu_v3_get_attr(vcpu, attr);
966*4882a593Smuzhiyun 		break;
967*4882a593Smuzhiyun 	case KVM_ARM_VCPU_TIMER_CTRL:
968*4882a593Smuzhiyun 		ret = kvm_arm_timer_get_attr(vcpu, attr);
969*4882a593Smuzhiyun 		break;
970*4882a593Smuzhiyun 	case KVM_ARM_VCPU_PVTIME_CTRL:
971*4882a593Smuzhiyun 		ret = kvm_arm_pvtime_get_attr(vcpu, attr);
972*4882a593Smuzhiyun 		break;
973*4882a593Smuzhiyun 	default:
974*4882a593Smuzhiyun 		ret = -ENXIO;
975*4882a593Smuzhiyun 		break;
976*4882a593Smuzhiyun 	}
977*4882a593Smuzhiyun 
978*4882a593Smuzhiyun 	return ret;
979*4882a593Smuzhiyun }
980*4882a593Smuzhiyun 
kvm_arm_vcpu_arch_has_attr(struct kvm_vcpu * vcpu,struct kvm_device_attr * attr)981*4882a593Smuzhiyun int kvm_arm_vcpu_arch_has_attr(struct kvm_vcpu *vcpu,
982*4882a593Smuzhiyun 			       struct kvm_device_attr *attr)
983*4882a593Smuzhiyun {
984*4882a593Smuzhiyun 	int ret;
985*4882a593Smuzhiyun 
986*4882a593Smuzhiyun 	switch (attr->group) {
987*4882a593Smuzhiyun 	case KVM_ARM_VCPU_PMU_V3_CTRL:
988*4882a593Smuzhiyun 		ret = kvm_arm_pmu_v3_has_attr(vcpu, attr);
989*4882a593Smuzhiyun 		break;
990*4882a593Smuzhiyun 	case KVM_ARM_VCPU_TIMER_CTRL:
991*4882a593Smuzhiyun 		ret = kvm_arm_timer_has_attr(vcpu, attr);
992*4882a593Smuzhiyun 		break;
993*4882a593Smuzhiyun 	case KVM_ARM_VCPU_PVTIME_CTRL:
994*4882a593Smuzhiyun 		ret = kvm_arm_pvtime_has_attr(vcpu, attr);
995*4882a593Smuzhiyun 		break;
996*4882a593Smuzhiyun 	default:
997*4882a593Smuzhiyun 		ret = -ENXIO;
998*4882a593Smuzhiyun 		break;
999*4882a593Smuzhiyun 	}
1000*4882a593Smuzhiyun 
1001*4882a593Smuzhiyun 	return ret;
1002*4882a593Smuzhiyun }
1003