1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Copyright (C) 2012,2013 - ARM Ltd
4*4882a593Smuzhiyun * Author: Marc Zyngier <marc.zyngier@arm.com>
5*4882a593Smuzhiyun *
6*4882a593Smuzhiyun * Derived from arch/arm/kvm/reset.c
7*4882a593Smuzhiyun * Copyright (C) 2012 - Virtual Open Systems and Columbia University
8*4882a593Smuzhiyun * Author: Christoffer Dall <c.dall@virtualopensystems.com>
9*4882a593Smuzhiyun */
10*4882a593Smuzhiyun
11*4882a593Smuzhiyun #include <linux/errno.h>
12*4882a593Smuzhiyun #include <linux/kernel.h>
13*4882a593Smuzhiyun #include <linux/kvm_host.h>
14*4882a593Smuzhiyun #include <linux/kvm.h>
15*4882a593Smuzhiyun #include <linux/hw_breakpoint.h>
16*4882a593Smuzhiyun #include <linux/slab.h>
17*4882a593Smuzhiyun #include <linux/string.h>
18*4882a593Smuzhiyun #include <linux/types.h>
19*4882a593Smuzhiyun
20*4882a593Smuzhiyun #include <kvm/arm_arch_timer.h>
21*4882a593Smuzhiyun
22*4882a593Smuzhiyun #include <asm/cpufeature.h>
23*4882a593Smuzhiyun #include <asm/cputype.h>
24*4882a593Smuzhiyun #include <asm/fpsimd.h>
25*4882a593Smuzhiyun #include <asm/ptrace.h>
26*4882a593Smuzhiyun #include <asm/kvm_arm.h>
27*4882a593Smuzhiyun #include <asm/kvm_asm.h>
28*4882a593Smuzhiyun #include <asm/kvm_emulate.h>
29*4882a593Smuzhiyun #include <asm/kvm_mmu.h>
30*4882a593Smuzhiyun #include <asm/virt.h>
31*4882a593Smuzhiyun
32*4882a593Smuzhiyun /* Maximum phys_shift supported for any VM on this host */
33*4882a593Smuzhiyun static u32 kvm_ipa_limit;
34*4882a593Smuzhiyun
35*4882a593Smuzhiyun /*
36*4882a593Smuzhiyun * ARMv8 Reset Values
37*4882a593Smuzhiyun */
38*4882a593Smuzhiyun #define VCPU_RESET_PSTATE_EL1 (PSR_MODE_EL1h | PSR_A_BIT | PSR_I_BIT | \
39*4882a593Smuzhiyun PSR_F_BIT | PSR_D_BIT)
40*4882a593Smuzhiyun
41*4882a593Smuzhiyun #define VCPU_RESET_PSTATE_SVC (PSR_AA32_MODE_SVC | PSR_AA32_A_BIT | \
42*4882a593Smuzhiyun PSR_AA32_I_BIT | PSR_AA32_F_BIT)
43*4882a593Smuzhiyun
44*4882a593Smuzhiyun unsigned int kvm_sve_max_vl;
45*4882a593Smuzhiyun
kvm_arm_init_sve(void)46*4882a593Smuzhiyun int kvm_arm_init_sve(void)
47*4882a593Smuzhiyun {
48*4882a593Smuzhiyun if (system_supports_sve()) {
49*4882a593Smuzhiyun kvm_sve_max_vl = sve_max_virtualisable_vl;
50*4882a593Smuzhiyun
51*4882a593Smuzhiyun /*
52*4882a593Smuzhiyun * The get_sve_reg()/set_sve_reg() ioctl interface will need
53*4882a593Smuzhiyun * to be extended with multiple register slice support in
54*4882a593Smuzhiyun * order to support vector lengths greater than
55*4882a593Smuzhiyun * SVE_VL_ARCH_MAX:
56*4882a593Smuzhiyun */
57*4882a593Smuzhiyun if (WARN_ON(kvm_sve_max_vl > SVE_VL_ARCH_MAX))
58*4882a593Smuzhiyun kvm_sve_max_vl = SVE_VL_ARCH_MAX;
59*4882a593Smuzhiyun
60*4882a593Smuzhiyun /*
61*4882a593Smuzhiyun * Don't even try to make use of vector lengths that
62*4882a593Smuzhiyun * aren't available on all CPUs, for now:
63*4882a593Smuzhiyun */
64*4882a593Smuzhiyun if (kvm_sve_max_vl < sve_max_vl)
65*4882a593Smuzhiyun pr_warn("KVM: SVE vector length for guests limited to %u bytes\n",
66*4882a593Smuzhiyun kvm_sve_max_vl);
67*4882a593Smuzhiyun }
68*4882a593Smuzhiyun
69*4882a593Smuzhiyun return 0;
70*4882a593Smuzhiyun }
71*4882a593Smuzhiyun
kvm_vcpu_enable_sve(struct kvm_vcpu * vcpu)72*4882a593Smuzhiyun static int kvm_vcpu_enable_sve(struct kvm_vcpu *vcpu)
73*4882a593Smuzhiyun {
74*4882a593Smuzhiyun if (!system_supports_sve())
75*4882a593Smuzhiyun return -EINVAL;
76*4882a593Smuzhiyun
77*4882a593Smuzhiyun vcpu->arch.sve_max_vl = kvm_sve_max_vl;
78*4882a593Smuzhiyun
79*4882a593Smuzhiyun /*
80*4882a593Smuzhiyun * Userspace can still customize the vector lengths by writing
81*4882a593Smuzhiyun * KVM_REG_ARM64_SVE_VLS. Allocation is deferred until
82*4882a593Smuzhiyun * kvm_arm_vcpu_finalize(), which freezes the configuration.
83*4882a593Smuzhiyun */
84*4882a593Smuzhiyun vcpu->arch.flags |= KVM_ARM64_GUEST_HAS_SVE;
85*4882a593Smuzhiyun
86*4882a593Smuzhiyun return 0;
87*4882a593Smuzhiyun }
88*4882a593Smuzhiyun
89*4882a593Smuzhiyun /*
90*4882a593Smuzhiyun * Finalize vcpu's maximum SVE vector length, allocating
91*4882a593Smuzhiyun * vcpu->arch.sve_state as necessary.
92*4882a593Smuzhiyun */
kvm_vcpu_finalize_sve(struct kvm_vcpu * vcpu)93*4882a593Smuzhiyun static int kvm_vcpu_finalize_sve(struct kvm_vcpu *vcpu)
94*4882a593Smuzhiyun {
95*4882a593Smuzhiyun void *buf;
96*4882a593Smuzhiyun unsigned int vl;
97*4882a593Smuzhiyun
98*4882a593Smuzhiyun vl = vcpu->arch.sve_max_vl;
99*4882a593Smuzhiyun
100*4882a593Smuzhiyun /*
101*4882a593Smuzhiyun * Responsibility for these properties is shared between
102*4882a593Smuzhiyun * kvm_arm_init_arch_resources(), kvm_vcpu_enable_sve() and
103*4882a593Smuzhiyun * set_sve_vls(). Double-check here just to be sure:
104*4882a593Smuzhiyun */
105*4882a593Smuzhiyun if (WARN_ON(!sve_vl_valid(vl) || vl > sve_max_virtualisable_vl ||
106*4882a593Smuzhiyun vl > SVE_VL_ARCH_MAX))
107*4882a593Smuzhiyun return -EIO;
108*4882a593Smuzhiyun
109*4882a593Smuzhiyun buf = kzalloc(SVE_SIG_REGS_SIZE(sve_vq_from_vl(vl)), GFP_KERNEL);
110*4882a593Smuzhiyun if (!buf)
111*4882a593Smuzhiyun return -ENOMEM;
112*4882a593Smuzhiyun
113*4882a593Smuzhiyun vcpu->arch.sve_state = buf;
114*4882a593Smuzhiyun vcpu->arch.flags |= KVM_ARM64_VCPU_SVE_FINALIZED;
115*4882a593Smuzhiyun return 0;
116*4882a593Smuzhiyun }
117*4882a593Smuzhiyun
kvm_arm_vcpu_finalize(struct kvm_vcpu * vcpu,int feature)118*4882a593Smuzhiyun int kvm_arm_vcpu_finalize(struct kvm_vcpu *vcpu, int feature)
119*4882a593Smuzhiyun {
120*4882a593Smuzhiyun switch (feature) {
121*4882a593Smuzhiyun case KVM_ARM_VCPU_SVE:
122*4882a593Smuzhiyun if (!vcpu_has_sve(vcpu))
123*4882a593Smuzhiyun return -EINVAL;
124*4882a593Smuzhiyun
125*4882a593Smuzhiyun if (kvm_arm_vcpu_sve_finalized(vcpu))
126*4882a593Smuzhiyun return -EPERM;
127*4882a593Smuzhiyun
128*4882a593Smuzhiyun return kvm_vcpu_finalize_sve(vcpu);
129*4882a593Smuzhiyun }
130*4882a593Smuzhiyun
131*4882a593Smuzhiyun return -EINVAL;
132*4882a593Smuzhiyun }
133*4882a593Smuzhiyun
kvm_arm_vcpu_is_finalized(struct kvm_vcpu * vcpu)134*4882a593Smuzhiyun bool kvm_arm_vcpu_is_finalized(struct kvm_vcpu *vcpu)
135*4882a593Smuzhiyun {
136*4882a593Smuzhiyun if (vcpu_has_sve(vcpu) && !kvm_arm_vcpu_sve_finalized(vcpu))
137*4882a593Smuzhiyun return false;
138*4882a593Smuzhiyun
139*4882a593Smuzhiyun return true;
140*4882a593Smuzhiyun }
141*4882a593Smuzhiyun
kvm_arm_vcpu_destroy(struct kvm_vcpu * vcpu)142*4882a593Smuzhiyun void kvm_arm_vcpu_destroy(struct kvm_vcpu *vcpu)
143*4882a593Smuzhiyun {
144*4882a593Smuzhiyun kfree(vcpu->arch.sve_state);
145*4882a593Smuzhiyun }
146*4882a593Smuzhiyun
kvm_vcpu_reset_sve(struct kvm_vcpu * vcpu)147*4882a593Smuzhiyun static void kvm_vcpu_reset_sve(struct kvm_vcpu *vcpu)
148*4882a593Smuzhiyun {
149*4882a593Smuzhiyun if (vcpu_has_sve(vcpu))
150*4882a593Smuzhiyun memset(vcpu->arch.sve_state, 0, vcpu_sve_state_size(vcpu));
151*4882a593Smuzhiyun }
152*4882a593Smuzhiyun
kvm_vcpu_enable_ptrauth(struct kvm_vcpu * vcpu)153*4882a593Smuzhiyun static int kvm_vcpu_enable_ptrauth(struct kvm_vcpu *vcpu)
154*4882a593Smuzhiyun {
155*4882a593Smuzhiyun /*
156*4882a593Smuzhiyun * For now make sure that both address/generic pointer authentication
157*4882a593Smuzhiyun * features are requested by the userspace together and the system
158*4882a593Smuzhiyun * supports these capabilities.
159*4882a593Smuzhiyun */
160*4882a593Smuzhiyun if (!test_bit(KVM_ARM_VCPU_PTRAUTH_ADDRESS, vcpu->arch.features) ||
161*4882a593Smuzhiyun !test_bit(KVM_ARM_VCPU_PTRAUTH_GENERIC, vcpu->arch.features) ||
162*4882a593Smuzhiyun !system_has_full_ptr_auth())
163*4882a593Smuzhiyun return -EINVAL;
164*4882a593Smuzhiyun
165*4882a593Smuzhiyun vcpu->arch.flags |= KVM_ARM64_GUEST_HAS_PTRAUTH;
166*4882a593Smuzhiyun return 0;
167*4882a593Smuzhiyun }
168*4882a593Smuzhiyun
vcpu_allowed_register_width(struct kvm_vcpu * vcpu)169*4882a593Smuzhiyun static bool vcpu_allowed_register_width(struct kvm_vcpu *vcpu)
170*4882a593Smuzhiyun {
171*4882a593Smuzhiyun struct kvm_vcpu *tmp;
172*4882a593Smuzhiyun bool is32bit;
173*4882a593Smuzhiyun int i;
174*4882a593Smuzhiyun
175*4882a593Smuzhiyun is32bit = vcpu_has_feature(vcpu, KVM_ARM_VCPU_EL1_32BIT);
176*4882a593Smuzhiyun if (!cpus_have_const_cap(ARM64_HAS_32BIT_EL1) && is32bit)
177*4882a593Smuzhiyun return false;
178*4882a593Smuzhiyun
179*4882a593Smuzhiyun /* Check that the vcpus are either all 32bit or all 64bit */
180*4882a593Smuzhiyun kvm_for_each_vcpu(i, tmp, vcpu->kvm) {
181*4882a593Smuzhiyun if (vcpu_has_feature(tmp, KVM_ARM_VCPU_EL1_32BIT) != is32bit)
182*4882a593Smuzhiyun return false;
183*4882a593Smuzhiyun }
184*4882a593Smuzhiyun
185*4882a593Smuzhiyun return true;
186*4882a593Smuzhiyun }
187*4882a593Smuzhiyun
188*4882a593Smuzhiyun /**
189*4882a593Smuzhiyun * kvm_reset_vcpu - sets core registers and sys_regs to reset value
190*4882a593Smuzhiyun * @vcpu: The VCPU pointer
191*4882a593Smuzhiyun *
192*4882a593Smuzhiyun * This function finds the right table above and sets the registers on
193*4882a593Smuzhiyun * the virtual CPU struct to their architecturally defined reset
194*4882a593Smuzhiyun * values, except for registers whose reset is deferred until
195*4882a593Smuzhiyun * kvm_arm_vcpu_finalize().
196*4882a593Smuzhiyun *
197*4882a593Smuzhiyun * Note: This function can be called from two paths: The KVM_ARM_VCPU_INIT
198*4882a593Smuzhiyun * ioctl or as part of handling a request issued by another VCPU in the PSCI
199*4882a593Smuzhiyun * handling code. In the first case, the VCPU will not be loaded, and in the
200*4882a593Smuzhiyun * second case the VCPU will be loaded. Because this function operates purely
201*4882a593Smuzhiyun * on the memory-backed values of system registers, we want to do a full put if
202*4882a593Smuzhiyun * we were loaded (handling a request) and load the values back at the end of
203*4882a593Smuzhiyun * the function. Otherwise we leave the state alone. In both cases, we
204*4882a593Smuzhiyun * disable preemption around the vcpu reset as we would otherwise race with
205*4882a593Smuzhiyun * preempt notifiers which also call put/load.
206*4882a593Smuzhiyun */
kvm_reset_vcpu(struct kvm_vcpu * vcpu)207*4882a593Smuzhiyun int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
208*4882a593Smuzhiyun {
209*4882a593Smuzhiyun struct vcpu_reset_state reset_state;
210*4882a593Smuzhiyun int ret;
211*4882a593Smuzhiyun bool loaded;
212*4882a593Smuzhiyun u32 pstate;
213*4882a593Smuzhiyun
214*4882a593Smuzhiyun mutex_lock(&vcpu->kvm->lock);
215*4882a593Smuzhiyun reset_state = vcpu->arch.reset_state;
216*4882a593Smuzhiyun WRITE_ONCE(vcpu->arch.reset_state.reset, false);
217*4882a593Smuzhiyun mutex_unlock(&vcpu->kvm->lock);
218*4882a593Smuzhiyun
219*4882a593Smuzhiyun /* Reset PMU outside of the non-preemptible section */
220*4882a593Smuzhiyun kvm_pmu_vcpu_reset(vcpu);
221*4882a593Smuzhiyun
222*4882a593Smuzhiyun preempt_disable();
223*4882a593Smuzhiyun loaded = (vcpu->cpu != -1);
224*4882a593Smuzhiyun if (loaded)
225*4882a593Smuzhiyun kvm_arch_vcpu_put(vcpu);
226*4882a593Smuzhiyun
227*4882a593Smuzhiyun if (!kvm_arm_vcpu_sve_finalized(vcpu)) {
228*4882a593Smuzhiyun if (test_bit(KVM_ARM_VCPU_SVE, vcpu->arch.features)) {
229*4882a593Smuzhiyun ret = kvm_vcpu_enable_sve(vcpu);
230*4882a593Smuzhiyun if (ret)
231*4882a593Smuzhiyun goto out;
232*4882a593Smuzhiyun }
233*4882a593Smuzhiyun } else {
234*4882a593Smuzhiyun kvm_vcpu_reset_sve(vcpu);
235*4882a593Smuzhiyun }
236*4882a593Smuzhiyun
237*4882a593Smuzhiyun if (test_bit(KVM_ARM_VCPU_PTRAUTH_ADDRESS, vcpu->arch.features) ||
238*4882a593Smuzhiyun test_bit(KVM_ARM_VCPU_PTRAUTH_GENERIC, vcpu->arch.features)) {
239*4882a593Smuzhiyun if (kvm_vcpu_enable_ptrauth(vcpu)) {
240*4882a593Smuzhiyun ret = -EINVAL;
241*4882a593Smuzhiyun goto out;
242*4882a593Smuzhiyun }
243*4882a593Smuzhiyun }
244*4882a593Smuzhiyun
245*4882a593Smuzhiyun if (!vcpu_allowed_register_width(vcpu)) {
246*4882a593Smuzhiyun ret = -EINVAL;
247*4882a593Smuzhiyun goto out;
248*4882a593Smuzhiyun }
249*4882a593Smuzhiyun
250*4882a593Smuzhiyun switch (vcpu->arch.target) {
251*4882a593Smuzhiyun default:
252*4882a593Smuzhiyun if (test_bit(KVM_ARM_VCPU_EL1_32BIT, vcpu->arch.features)) {
253*4882a593Smuzhiyun pstate = VCPU_RESET_PSTATE_SVC;
254*4882a593Smuzhiyun } else {
255*4882a593Smuzhiyun pstate = VCPU_RESET_PSTATE_EL1;
256*4882a593Smuzhiyun }
257*4882a593Smuzhiyun
258*4882a593Smuzhiyun if (kvm_vcpu_has_pmu(vcpu) && !kvm_arm_support_pmu_v3()) {
259*4882a593Smuzhiyun ret = -EINVAL;
260*4882a593Smuzhiyun goto out;
261*4882a593Smuzhiyun }
262*4882a593Smuzhiyun break;
263*4882a593Smuzhiyun }
264*4882a593Smuzhiyun
265*4882a593Smuzhiyun /* Reset core registers */
266*4882a593Smuzhiyun memset(vcpu_gp_regs(vcpu), 0, sizeof(*vcpu_gp_regs(vcpu)));
267*4882a593Smuzhiyun memset(&vcpu->arch.ctxt.fp_regs, 0, sizeof(vcpu->arch.ctxt.fp_regs));
268*4882a593Smuzhiyun vcpu->arch.ctxt.spsr_abt = 0;
269*4882a593Smuzhiyun vcpu->arch.ctxt.spsr_und = 0;
270*4882a593Smuzhiyun vcpu->arch.ctxt.spsr_irq = 0;
271*4882a593Smuzhiyun vcpu->arch.ctxt.spsr_fiq = 0;
272*4882a593Smuzhiyun vcpu_gp_regs(vcpu)->pstate = pstate;
273*4882a593Smuzhiyun
274*4882a593Smuzhiyun /* Reset system registers */
275*4882a593Smuzhiyun kvm_reset_sys_regs(vcpu);
276*4882a593Smuzhiyun
277*4882a593Smuzhiyun /*
278*4882a593Smuzhiyun * Additional reset state handling that PSCI may have imposed on us.
279*4882a593Smuzhiyun * Must be done after all the sys_reg reset.
280*4882a593Smuzhiyun */
281*4882a593Smuzhiyun if (reset_state.reset) {
282*4882a593Smuzhiyun unsigned long target_pc = reset_state.pc;
283*4882a593Smuzhiyun
284*4882a593Smuzhiyun /* Gracefully handle Thumb2 entry point */
285*4882a593Smuzhiyun if (vcpu_mode_is_32bit(vcpu) && (target_pc & 1)) {
286*4882a593Smuzhiyun target_pc &= ~1UL;
287*4882a593Smuzhiyun vcpu_set_thumb(vcpu);
288*4882a593Smuzhiyun }
289*4882a593Smuzhiyun
290*4882a593Smuzhiyun /* Propagate caller endianness */
291*4882a593Smuzhiyun if (reset_state.be)
292*4882a593Smuzhiyun kvm_vcpu_set_be(vcpu);
293*4882a593Smuzhiyun
294*4882a593Smuzhiyun *vcpu_pc(vcpu) = target_pc;
295*4882a593Smuzhiyun vcpu_set_reg(vcpu, 0, reset_state.r0);
296*4882a593Smuzhiyun }
297*4882a593Smuzhiyun
298*4882a593Smuzhiyun /* Reset timer */
299*4882a593Smuzhiyun ret = kvm_timer_vcpu_reset(vcpu);
300*4882a593Smuzhiyun out:
301*4882a593Smuzhiyun if (loaded)
302*4882a593Smuzhiyun kvm_arch_vcpu_load(vcpu, smp_processor_id());
303*4882a593Smuzhiyun preempt_enable();
304*4882a593Smuzhiyun return ret;
305*4882a593Smuzhiyun }
306*4882a593Smuzhiyun
get_kvm_ipa_limit(void)307*4882a593Smuzhiyun u32 get_kvm_ipa_limit(void)
308*4882a593Smuzhiyun {
309*4882a593Smuzhiyun return kvm_ipa_limit;
310*4882a593Smuzhiyun }
311*4882a593Smuzhiyun
kvm_set_ipa_limit(void)312*4882a593Smuzhiyun int kvm_set_ipa_limit(void)
313*4882a593Smuzhiyun {
314*4882a593Smuzhiyun unsigned int parange, tgran_2;
315*4882a593Smuzhiyun u64 mmfr0;
316*4882a593Smuzhiyun
317*4882a593Smuzhiyun mmfr0 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1);
318*4882a593Smuzhiyun parange = cpuid_feature_extract_unsigned_field(mmfr0,
319*4882a593Smuzhiyun ID_AA64MMFR0_PARANGE_SHIFT);
320*4882a593Smuzhiyun /*
321*4882a593Smuzhiyun * IPA size beyond 48 bits could not be supported
322*4882a593Smuzhiyun * on either 4K or 16K page size. Hence let's cap
323*4882a593Smuzhiyun * it to 48 bits, in case it's reported as larger
324*4882a593Smuzhiyun * on the system.
325*4882a593Smuzhiyun */
326*4882a593Smuzhiyun if (PAGE_SIZE != SZ_64K)
327*4882a593Smuzhiyun parange = min(parange, (unsigned int)ID_AA64MMFR0_PARANGE_48);
328*4882a593Smuzhiyun
329*4882a593Smuzhiyun /*
330*4882a593Smuzhiyun * Check with ARMv8.5-GTG that our PAGE_SIZE is supported at
331*4882a593Smuzhiyun * Stage-2. If not, things will stop very quickly.
332*4882a593Smuzhiyun */
333*4882a593Smuzhiyun switch (PAGE_SIZE) {
334*4882a593Smuzhiyun default:
335*4882a593Smuzhiyun case SZ_4K:
336*4882a593Smuzhiyun tgran_2 = ID_AA64MMFR0_TGRAN4_2_SHIFT;
337*4882a593Smuzhiyun break;
338*4882a593Smuzhiyun case SZ_16K:
339*4882a593Smuzhiyun tgran_2 = ID_AA64MMFR0_TGRAN16_2_SHIFT;
340*4882a593Smuzhiyun break;
341*4882a593Smuzhiyun case SZ_64K:
342*4882a593Smuzhiyun tgran_2 = ID_AA64MMFR0_TGRAN64_2_SHIFT;
343*4882a593Smuzhiyun break;
344*4882a593Smuzhiyun }
345*4882a593Smuzhiyun
346*4882a593Smuzhiyun switch (cpuid_feature_extract_unsigned_field(mmfr0, tgran_2)) {
347*4882a593Smuzhiyun case ID_AA64MMFR0_TGRAN_2_SUPPORTED_NONE:
348*4882a593Smuzhiyun kvm_err("PAGE_SIZE not supported at Stage-2, giving up\n");
349*4882a593Smuzhiyun return -EINVAL;
350*4882a593Smuzhiyun case ID_AA64MMFR0_TGRAN_2_SUPPORTED_DEFAULT:
351*4882a593Smuzhiyun kvm_debug("PAGE_SIZE supported at Stage-2 (default)\n");
352*4882a593Smuzhiyun break;
353*4882a593Smuzhiyun case ID_AA64MMFR0_TGRAN_2_SUPPORTED_MIN ... ID_AA64MMFR0_TGRAN_2_SUPPORTED_MAX:
354*4882a593Smuzhiyun kvm_debug("PAGE_SIZE supported at Stage-2 (advertised)\n");
355*4882a593Smuzhiyun break;
356*4882a593Smuzhiyun default:
357*4882a593Smuzhiyun kvm_err("Unsupported value for TGRAN_2, giving up\n");
358*4882a593Smuzhiyun return -EINVAL;
359*4882a593Smuzhiyun }
360*4882a593Smuzhiyun
361*4882a593Smuzhiyun kvm_ipa_limit = id_aa64mmfr0_parange_to_phys_shift(parange);
362*4882a593Smuzhiyun kvm_info("IPA Size Limit: %d bits%s\n", kvm_ipa_limit,
363*4882a593Smuzhiyun ((kvm_ipa_limit < KVM_PHYS_SHIFT) ?
364*4882a593Smuzhiyun " (Reduced IPA size, limited VM/VMM compatibility)" : ""));
365*4882a593Smuzhiyun
366*4882a593Smuzhiyun return 0;
367*4882a593Smuzhiyun }
368*4882a593Smuzhiyun
kvm_arm_setup_stage2(struct kvm * kvm,unsigned long type)369*4882a593Smuzhiyun int kvm_arm_setup_stage2(struct kvm *kvm, unsigned long type)
370*4882a593Smuzhiyun {
371*4882a593Smuzhiyun u64 mmfr0, mmfr1;
372*4882a593Smuzhiyun u32 phys_shift;
373*4882a593Smuzhiyun
374*4882a593Smuzhiyun if (type & ~KVM_VM_TYPE_ARM_IPA_SIZE_MASK)
375*4882a593Smuzhiyun return -EINVAL;
376*4882a593Smuzhiyun
377*4882a593Smuzhiyun phys_shift = KVM_VM_TYPE_ARM_IPA_SIZE(type);
378*4882a593Smuzhiyun if (phys_shift) {
379*4882a593Smuzhiyun if (phys_shift > kvm_ipa_limit ||
380*4882a593Smuzhiyun phys_shift < 32)
381*4882a593Smuzhiyun return -EINVAL;
382*4882a593Smuzhiyun } else {
383*4882a593Smuzhiyun phys_shift = KVM_PHYS_SHIFT;
384*4882a593Smuzhiyun if (phys_shift > kvm_ipa_limit) {
385*4882a593Smuzhiyun pr_warn_once("%s using unsupported default IPA limit, upgrade your VMM\n",
386*4882a593Smuzhiyun current->comm);
387*4882a593Smuzhiyun return -EINVAL;
388*4882a593Smuzhiyun }
389*4882a593Smuzhiyun }
390*4882a593Smuzhiyun
391*4882a593Smuzhiyun mmfr0 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1);
392*4882a593Smuzhiyun mmfr1 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR1_EL1);
393*4882a593Smuzhiyun kvm->arch.vtcr = kvm_get_vtcr(mmfr0, mmfr1, phys_shift);
394*4882a593Smuzhiyun
395*4882a593Smuzhiyun return 0;
396*4882a593Smuzhiyun }
397