1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * arch/arm64/kvm/fpsimd.c: Guest/host FPSIMD context coordination helpers
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * Copyright 2018 Arm Limited
6*4882a593Smuzhiyun * Author: Dave Martin <Dave.Martin@arm.com>
7*4882a593Smuzhiyun */
8*4882a593Smuzhiyun #include <linux/irqflags.h>
9*4882a593Smuzhiyun #include <linux/sched.h>
10*4882a593Smuzhiyun #include <linux/thread_info.h>
11*4882a593Smuzhiyun #include <linux/kvm_host.h>
12*4882a593Smuzhiyun #include <asm/fpsimd.h>
13*4882a593Smuzhiyun #include <asm/kvm_asm.h>
14*4882a593Smuzhiyun #include <asm/kvm_hyp.h>
15*4882a593Smuzhiyun #include <asm/kvm_mmu.h>
16*4882a593Smuzhiyun #include <asm/sysreg.h>
17*4882a593Smuzhiyun
18*4882a593Smuzhiyun /*
19*4882a593Smuzhiyun * Called on entry to KVM_RUN unless this vcpu previously ran at least
20*4882a593Smuzhiyun * once and the most recent prior KVM_RUN for this vcpu was called from
21*4882a593Smuzhiyun * the same task as current (highly likely).
22*4882a593Smuzhiyun *
23*4882a593Smuzhiyun * This is guaranteed to execute before kvm_arch_vcpu_load_fp(vcpu),
24*4882a593Smuzhiyun * such that on entering hyp the relevant parts of current are already
25*4882a593Smuzhiyun * mapped.
26*4882a593Smuzhiyun */
kvm_arch_vcpu_run_map_fp(struct kvm_vcpu * vcpu)27*4882a593Smuzhiyun int kvm_arch_vcpu_run_map_fp(struct kvm_vcpu *vcpu)
28*4882a593Smuzhiyun {
29*4882a593Smuzhiyun int ret;
30*4882a593Smuzhiyun
31*4882a593Smuzhiyun struct thread_info *ti = ¤t->thread_info;
32*4882a593Smuzhiyun struct user_fpsimd_state *fpsimd = ¤t->thread.uw.fpsimd_state;
33*4882a593Smuzhiyun
34*4882a593Smuzhiyun /*
35*4882a593Smuzhiyun * Make sure the host task thread flags and fpsimd state are
36*4882a593Smuzhiyun * visible to hyp:
37*4882a593Smuzhiyun */
38*4882a593Smuzhiyun ret = create_hyp_mappings(ti, ti + 1, PAGE_HYP);
39*4882a593Smuzhiyun if (ret)
40*4882a593Smuzhiyun goto error;
41*4882a593Smuzhiyun
42*4882a593Smuzhiyun ret = create_hyp_mappings(fpsimd, fpsimd + 1, PAGE_HYP);
43*4882a593Smuzhiyun if (ret)
44*4882a593Smuzhiyun goto error;
45*4882a593Smuzhiyun
46*4882a593Smuzhiyun if (vcpu->arch.sve_state) {
47*4882a593Smuzhiyun void *sve_end;
48*4882a593Smuzhiyun
49*4882a593Smuzhiyun sve_end = vcpu->arch.sve_state + vcpu_sve_state_size(vcpu);
50*4882a593Smuzhiyun
51*4882a593Smuzhiyun ret = create_hyp_mappings(vcpu->arch.sve_state, sve_end,
52*4882a593Smuzhiyun PAGE_HYP);
53*4882a593Smuzhiyun if (ret)
54*4882a593Smuzhiyun goto error;
55*4882a593Smuzhiyun }
56*4882a593Smuzhiyun
57*4882a593Smuzhiyun vcpu->arch.host_thread_info = kern_hyp_va(ti);
58*4882a593Smuzhiyun vcpu->arch.host_fpsimd_state = kern_hyp_va(fpsimd);
59*4882a593Smuzhiyun error:
60*4882a593Smuzhiyun return ret;
61*4882a593Smuzhiyun }
62*4882a593Smuzhiyun
63*4882a593Smuzhiyun /*
64*4882a593Smuzhiyun * Prepare vcpu for saving the host's FPSIMD state and loading the guest's.
65*4882a593Smuzhiyun * The actual loading is done by the FPSIMD access trap taken to hyp.
66*4882a593Smuzhiyun *
67*4882a593Smuzhiyun * Here, we just set the correct metadata to indicate that the FPSIMD
68*4882a593Smuzhiyun * state in the cpu regs (if any) belongs to current on the host.
69*4882a593Smuzhiyun *
70*4882a593Smuzhiyun * TIF_SVE is backed up here, since it may get clobbered with guest state.
71*4882a593Smuzhiyun * This flag is restored by kvm_arch_vcpu_put_fp(vcpu).
72*4882a593Smuzhiyun */
kvm_arch_vcpu_load_fp(struct kvm_vcpu * vcpu)73*4882a593Smuzhiyun void kvm_arch_vcpu_load_fp(struct kvm_vcpu *vcpu)
74*4882a593Smuzhiyun {
75*4882a593Smuzhiyun BUG_ON(!current->mm);
76*4882a593Smuzhiyun
77*4882a593Smuzhiyun vcpu->arch.flags &= ~(KVM_ARM64_FP_ENABLED |
78*4882a593Smuzhiyun KVM_ARM64_HOST_SVE_IN_USE |
79*4882a593Smuzhiyun KVM_ARM64_HOST_SVE_ENABLED);
80*4882a593Smuzhiyun vcpu->arch.flags |= KVM_ARM64_FP_HOST;
81*4882a593Smuzhiyun
82*4882a593Smuzhiyun if (test_thread_flag(TIF_SVE))
83*4882a593Smuzhiyun vcpu->arch.flags |= KVM_ARM64_HOST_SVE_IN_USE;
84*4882a593Smuzhiyun
85*4882a593Smuzhiyun if (read_sysreg(cpacr_el1) & CPACR_EL1_ZEN_EL0EN)
86*4882a593Smuzhiyun vcpu->arch.flags |= KVM_ARM64_HOST_SVE_ENABLED;
87*4882a593Smuzhiyun }
88*4882a593Smuzhiyun
89*4882a593Smuzhiyun /*
90*4882a593Smuzhiyun * If the guest FPSIMD state was loaded, update the host's context
91*4882a593Smuzhiyun * tracking data mark the CPU FPSIMD regs as dirty and belonging to vcpu
92*4882a593Smuzhiyun * so that they will be written back if the kernel clobbers them due to
93*4882a593Smuzhiyun * kernel-mode NEON before re-entry into the guest.
94*4882a593Smuzhiyun */
kvm_arch_vcpu_ctxsync_fp(struct kvm_vcpu * vcpu)95*4882a593Smuzhiyun void kvm_arch_vcpu_ctxsync_fp(struct kvm_vcpu *vcpu)
96*4882a593Smuzhiyun {
97*4882a593Smuzhiyun WARN_ON_ONCE(!irqs_disabled());
98*4882a593Smuzhiyun
99*4882a593Smuzhiyun if (vcpu->arch.flags & KVM_ARM64_FP_ENABLED) {
100*4882a593Smuzhiyun fpsimd_bind_state_to_cpu(&vcpu->arch.ctxt.fp_regs,
101*4882a593Smuzhiyun vcpu->arch.sve_state,
102*4882a593Smuzhiyun vcpu->arch.sve_max_vl);
103*4882a593Smuzhiyun
104*4882a593Smuzhiyun clear_thread_flag(TIF_FOREIGN_FPSTATE);
105*4882a593Smuzhiyun update_thread_flag(TIF_SVE, vcpu_has_sve(vcpu));
106*4882a593Smuzhiyun }
107*4882a593Smuzhiyun }
108*4882a593Smuzhiyun
109*4882a593Smuzhiyun /*
110*4882a593Smuzhiyun * Write back the vcpu FPSIMD regs if they are dirty, and invalidate the
111*4882a593Smuzhiyun * cpu FPSIMD regs so that they can't be spuriously reused if this vcpu
112*4882a593Smuzhiyun * disappears and another task or vcpu appears that recycles the same
113*4882a593Smuzhiyun * struct fpsimd_state.
114*4882a593Smuzhiyun */
kvm_arch_vcpu_put_fp(struct kvm_vcpu * vcpu)115*4882a593Smuzhiyun void kvm_arch_vcpu_put_fp(struct kvm_vcpu *vcpu)
116*4882a593Smuzhiyun {
117*4882a593Smuzhiyun unsigned long flags;
118*4882a593Smuzhiyun bool host_has_sve = system_supports_sve();
119*4882a593Smuzhiyun bool guest_has_sve = vcpu_has_sve(vcpu);
120*4882a593Smuzhiyun
121*4882a593Smuzhiyun local_irq_save(flags);
122*4882a593Smuzhiyun
123*4882a593Smuzhiyun if (vcpu->arch.flags & KVM_ARM64_FP_ENABLED) {
124*4882a593Smuzhiyun if (guest_has_sve) {
125*4882a593Smuzhiyun __vcpu_sys_reg(vcpu, ZCR_EL1) = read_sysreg_el1(SYS_ZCR);
126*4882a593Smuzhiyun
127*4882a593Smuzhiyun /* Restore the VL that was saved when bound to the CPU */
128*4882a593Smuzhiyun if (!has_vhe())
129*4882a593Smuzhiyun sve_cond_update_zcr_vq(vcpu_sve_max_vq(vcpu) - 1,
130*4882a593Smuzhiyun SYS_ZCR_EL1);
131*4882a593Smuzhiyun }
132*4882a593Smuzhiyun
133*4882a593Smuzhiyun fpsimd_save_and_flush_cpu_state();
134*4882a593Smuzhiyun } else if (has_vhe() && host_has_sve) {
135*4882a593Smuzhiyun /*
136*4882a593Smuzhiyun * The FPSIMD/SVE state in the CPU has not been touched, and we
137*4882a593Smuzhiyun * have SVE (and VHE): CPACR_EL1 (alias CPTR_EL2) has been
138*4882a593Smuzhiyun * reset to CPACR_EL1_DEFAULT by the Hyp code, disabling SVE
139*4882a593Smuzhiyun * for EL0. To avoid spurious traps, restore the trap state
140*4882a593Smuzhiyun * seen by kvm_arch_vcpu_load_fp():
141*4882a593Smuzhiyun */
142*4882a593Smuzhiyun if (vcpu->arch.flags & KVM_ARM64_HOST_SVE_ENABLED)
143*4882a593Smuzhiyun sysreg_clear_set(CPACR_EL1, 0, CPACR_EL1_ZEN_EL0EN);
144*4882a593Smuzhiyun else
145*4882a593Smuzhiyun sysreg_clear_set(CPACR_EL1, CPACR_EL1_ZEN_EL0EN, 0);
146*4882a593Smuzhiyun }
147*4882a593Smuzhiyun
148*4882a593Smuzhiyun update_thread_flag(TIF_SVE,
149*4882a593Smuzhiyun vcpu->arch.flags & KVM_ARM64_HOST_SVE_IN_USE);
150*4882a593Smuzhiyun
151*4882a593Smuzhiyun local_irq_restore(flags);
152*4882a593Smuzhiyun }
153