1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Copyright (C) 2020 - Google Inc
4*4882a593Smuzhiyun * Author: Andrew Scull <ascull@google.com>
5*4882a593Smuzhiyun */
6*4882a593Smuzhiyun
7*4882a593Smuzhiyun #include <hyp/switch.h>
8*4882a593Smuzhiyun
9*4882a593Smuzhiyun #include <asm/pgtable-types.h>
10*4882a593Smuzhiyun #include <asm/kvm_asm.h>
11*4882a593Smuzhiyun #include <asm/kvm_emulate.h>
12*4882a593Smuzhiyun #include <asm/kvm_host.h>
13*4882a593Smuzhiyun #include <asm/kvm_hyp.h>
14*4882a593Smuzhiyun #include <asm/kvm_mmu.h>
15*4882a593Smuzhiyun
16*4882a593Smuzhiyun #include <nvhe/mem_protect.h>
17*4882a593Smuzhiyun #include <nvhe/mm.h>
18*4882a593Smuzhiyun #include <nvhe/trap_handler.h>
19*4882a593Smuzhiyun
20*4882a593Smuzhiyun DEFINE_PER_CPU(struct kvm_nvhe_init_params, kvm_init_params);
21*4882a593Smuzhiyun
22*4882a593Smuzhiyun void __kvm_hyp_host_forward_smc(struct kvm_cpu_context *host_ctxt);
23*4882a593Smuzhiyun
handle___kvm_vcpu_run(struct kvm_cpu_context * host_ctxt)24*4882a593Smuzhiyun static void handle___kvm_vcpu_run(struct kvm_cpu_context *host_ctxt)
25*4882a593Smuzhiyun {
26*4882a593Smuzhiyun DECLARE_REG(struct kvm_vcpu *, vcpu, host_ctxt, 1);
27*4882a593Smuzhiyun
28*4882a593Smuzhiyun cpu_reg(host_ctxt, 1) = __kvm_vcpu_run(kern_hyp_va(vcpu));
29*4882a593Smuzhiyun }
30*4882a593Smuzhiyun
handle___kvm_flush_vm_context(struct kvm_cpu_context * host_ctxt)31*4882a593Smuzhiyun static void handle___kvm_flush_vm_context(struct kvm_cpu_context *host_ctxt)
32*4882a593Smuzhiyun {
33*4882a593Smuzhiyun __kvm_flush_vm_context();
34*4882a593Smuzhiyun }
35*4882a593Smuzhiyun
handle___kvm_tlb_flush_vmid_ipa(struct kvm_cpu_context * host_ctxt)36*4882a593Smuzhiyun static void handle___kvm_tlb_flush_vmid_ipa(struct kvm_cpu_context *host_ctxt)
37*4882a593Smuzhiyun {
38*4882a593Smuzhiyun DECLARE_REG(struct kvm_s2_mmu *, mmu, host_ctxt, 1);
39*4882a593Smuzhiyun DECLARE_REG(phys_addr_t, ipa, host_ctxt, 2);
40*4882a593Smuzhiyun DECLARE_REG(int, level, host_ctxt, 3);
41*4882a593Smuzhiyun
42*4882a593Smuzhiyun __kvm_tlb_flush_vmid_ipa(kern_hyp_va(mmu), ipa, level);
43*4882a593Smuzhiyun }
44*4882a593Smuzhiyun
handle___kvm_tlb_flush_vmid(struct kvm_cpu_context * host_ctxt)45*4882a593Smuzhiyun static void handle___kvm_tlb_flush_vmid(struct kvm_cpu_context *host_ctxt)
46*4882a593Smuzhiyun {
47*4882a593Smuzhiyun DECLARE_REG(struct kvm_s2_mmu *, mmu, host_ctxt, 1);
48*4882a593Smuzhiyun
49*4882a593Smuzhiyun __kvm_tlb_flush_vmid(kern_hyp_va(mmu));
50*4882a593Smuzhiyun }
51*4882a593Smuzhiyun
handle___kvm_flush_cpu_context(struct kvm_cpu_context * host_ctxt)52*4882a593Smuzhiyun static void handle___kvm_flush_cpu_context(struct kvm_cpu_context *host_ctxt)
53*4882a593Smuzhiyun {
54*4882a593Smuzhiyun DECLARE_REG(struct kvm_s2_mmu *, mmu, host_ctxt, 1);
55*4882a593Smuzhiyun
56*4882a593Smuzhiyun __kvm_flush_cpu_context(kern_hyp_va(mmu));
57*4882a593Smuzhiyun }
58*4882a593Smuzhiyun
handle___kvm_timer_set_cntvoff(struct kvm_cpu_context * host_ctxt)59*4882a593Smuzhiyun static void handle___kvm_timer_set_cntvoff(struct kvm_cpu_context *host_ctxt)
60*4882a593Smuzhiyun {
61*4882a593Smuzhiyun __kvm_timer_set_cntvoff(cpu_reg(host_ctxt, 1));
62*4882a593Smuzhiyun }
63*4882a593Smuzhiyun
handle___kvm_enable_ssbs(struct kvm_cpu_context * host_ctxt)64*4882a593Smuzhiyun static void handle___kvm_enable_ssbs(struct kvm_cpu_context *host_ctxt)
65*4882a593Smuzhiyun {
66*4882a593Smuzhiyun u64 tmp;
67*4882a593Smuzhiyun
68*4882a593Smuzhiyun tmp = read_sysreg_el2(SYS_SCTLR);
69*4882a593Smuzhiyun tmp |= SCTLR_ELx_DSSBS;
70*4882a593Smuzhiyun write_sysreg_el2(tmp, SYS_SCTLR);
71*4882a593Smuzhiyun }
72*4882a593Smuzhiyun
handle___vgic_v3_get_gic_config(struct kvm_cpu_context * host_ctxt)73*4882a593Smuzhiyun static void handle___vgic_v3_get_gic_config(struct kvm_cpu_context *host_ctxt)
74*4882a593Smuzhiyun {
75*4882a593Smuzhiyun cpu_reg(host_ctxt, 1) = __vgic_v3_get_gic_config();
76*4882a593Smuzhiyun }
77*4882a593Smuzhiyun
handle___vgic_v3_read_vmcr(struct kvm_cpu_context * host_ctxt)78*4882a593Smuzhiyun static void handle___vgic_v3_read_vmcr(struct kvm_cpu_context *host_ctxt)
79*4882a593Smuzhiyun {
80*4882a593Smuzhiyun cpu_reg(host_ctxt, 1) = __vgic_v3_read_vmcr();
81*4882a593Smuzhiyun }
82*4882a593Smuzhiyun
handle___vgic_v3_write_vmcr(struct kvm_cpu_context * host_ctxt)83*4882a593Smuzhiyun static void handle___vgic_v3_write_vmcr(struct kvm_cpu_context *host_ctxt)
84*4882a593Smuzhiyun {
85*4882a593Smuzhiyun __vgic_v3_write_vmcr(cpu_reg(host_ctxt, 1));
86*4882a593Smuzhiyun }
87*4882a593Smuzhiyun
handle___vgic_v3_init_lrs(struct kvm_cpu_context * host_ctxt)88*4882a593Smuzhiyun static void handle___vgic_v3_init_lrs(struct kvm_cpu_context *host_ctxt)
89*4882a593Smuzhiyun {
90*4882a593Smuzhiyun __vgic_v3_init_lrs();
91*4882a593Smuzhiyun }
92*4882a593Smuzhiyun
handle___kvm_get_mdcr_el2(struct kvm_cpu_context * host_ctxt)93*4882a593Smuzhiyun static void handle___kvm_get_mdcr_el2(struct kvm_cpu_context *host_ctxt)
94*4882a593Smuzhiyun {
95*4882a593Smuzhiyun cpu_reg(host_ctxt, 1) = __kvm_get_mdcr_el2();
96*4882a593Smuzhiyun }
97*4882a593Smuzhiyun
handle___vgic_v3_save_aprs(struct kvm_cpu_context * host_ctxt)98*4882a593Smuzhiyun static void handle___vgic_v3_save_aprs(struct kvm_cpu_context *host_ctxt)
99*4882a593Smuzhiyun {
100*4882a593Smuzhiyun DECLARE_REG(struct vgic_v3_cpu_if *, cpu_if, host_ctxt, 1);
101*4882a593Smuzhiyun
102*4882a593Smuzhiyun __vgic_v3_save_aprs(kern_hyp_va(cpu_if));
103*4882a593Smuzhiyun }
104*4882a593Smuzhiyun
handle___vgic_v3_restore_aprs(struct kvm_cpu_context * host_ctxt)105*4882a593Smuzhiyun static void handle___vgic_v3_restore_aprs(struct kvm_cpu_context *host_ctxt)
106*4882a593Smuzhiyun {
107*4882a593Smuzhiyun DECLARE_REG(struct vgic_v3_cpu_if *, cpu_if, host_ctxt, 1);
108*4882a593Smuzhiyun
109*4882a593Smuzhiyun __vgic_v3_restore_aprs(kern_hyp_va(cpu_if));
110*4882a593Smuzhiyun }
111*4882a593Smuzhiyun
handle___pkvm_init(struct kvm_cpu_context * host_ctxt)112*4882a593Smuzhiyun static void handle___pkvm_init(struct kvm_cpu_context *host_ctxt)
113*4882a593Smuzhiyun {
114*4882a593Smuzhiyun DECLARE_REG(phys_addr_t, phys, host_ctxt, 1);
115*4882a593Smuzhiyun DECLARE_REG(unsigned long, size, host_ctxt, 2);
116*4882a593Smuzhiyun DECLARE_REG(unsigned long, nr_cpus, host_ctxt, 3);
117*4882a593Smuzhiyun DECLARE_REG(unsigned long *, per_cpu_base, host_ctxt, 4);
118*4882a593Smuzhiyun DECLARE_REG(u32, hyp_va_bits, host_ctxt, 5);
119*4882a593Smuzhiyun
120*4882a593Smuzhiyun /*
121*4882a593Smuzhiyun * __pkvm_init() will return only if an error occurred, otherwise it
122*4882a593Smuzhiyun * will tail-call in __pkvm_init_finalise() which will have to deal
123*4882a593Smuzhiyun * with the host context directly.
124*4882a593Smuzhiyun */
125*4882a593Smuzhiyun cpu_reg(host_ctxt, 1) = __pkvm_init(phys, size, nr_cpus, per_cpu_base,
126*4882a593Smuzhiyun hyp_va_bits);
127*4882a593Smuzhiyun }
128*4882a593Smuzhiyun
handle___pkvm_cpu_set_vector(struct kvm_cpu_context * host_ctxt)129*4882a593Smuzhiyun static void handle___pkvm_cpu_set_vector(struct kvm_cpu_context *host_ctxt)
130*4882a593Smuzhiyun {
131*4882a593Smuzhiyun DECLARE_REG(enum arm64_hyp_spectre_vector, slot, host_ctxt, 1);
132*4882a593Smuzhiyun
133*4882a593Smuzhiyun cpu_reg(host_ctxt, 1) = pkvm_cpu_set_vector(slot);
134*4882a593Smuzhiyun }
135*4882a593Smuzhiyun
handle___pkvm_create_mappings(struct kvm_cpu_context * host_ctxt)136*4882a593Smuzhiyun static void handle___pkvm_create_mappings(struct kvm_cpu_context *host_ctxt)
137*4882a593Smuzhiyun {
138*4882a593Smuzhiyun DECLARE_REG(unsigned long, start, host_ctxt, 1);
139*4882a593Smuzhiyun DECLARE_REG(unsigned long, size, host_ctxt, 2);
140*4882a593Smuzhiyun DECLARE_REG(unsigned long, phys, host_ctxt, 3);
141*4882a593Smuzhiyun DECLARE_REG(enum kvm_pgtable_prot, prot, host_ctxt, 4);
142*4882a593Smuzhiyun
143*4882a593Smuzhiyun cpu_reg(host_ctxt, 1) = __pkvm_create_mappings(start, size, phys, prot);
144*4882a593Smuzhiyun }
145*4882a593Smuzhiyun
handle___pkvm_create_private_mapping(struct kvm_cpu_context * host_ctxt)146*4882a593Smuzhiyun static void handle___pkvm_create_private_mapping(struct kvm_cpu_context *host_ctxt)
147*4882a593Smuzhiyun {
148*4882a593Smuzhiyun DECLARE_REG(phys_addr_t, phys, host_ctxt, 1);
149*4882a593Smuzhiyun DECLARE_REG(size_t, size, host_ctxt, 2);
150*4882a593Smuzhiyun DECLARE_REG(enum kvm_pgtable_prot, prot, host_ctxt, 3);
151*4882a593Smuzhiyun
152*4882a593Smuzhiyun cpu_reg(host_ctxt, 1) = __pkvm_create_private_mapping(phys, size, prot);
153*4882a593Smuzhiyun }
154*4882a593Smuzhiyun
handle___pkvm_prot_finalize(struct kvm_cpu_context * host_ctxt)155*4882a593Smuzhiyun static void handle___pkvm_prot_finalize(struct kvm_cpu_context *host_ctxt)
156*4882a593Smuzhiyun {
157*4882a593Smuzhiyun cpu_reg(host_ctxt, 1) = __pkvm_prot_finalize();
158*4882a593Smuzhiyun }
159*4882a593Smuzhiyun
handle___pkvm_mark_hyp(struct kvm_cpu_context * host_ctxt)160*4882a593Smuzhiyun static void handle___pkvm_mark_hyp(struct kvm_cpu_context *host_ctxt)
161*4882a593Smuzhiyun {
162*4882a593Smuzhiyun DECLARE_REG(phys_addr_t, start, host_ctxt, 1);
163*4882a593Smuzhiyun DECLARE_REG(phys_addr_t, end, host_ctxt, 2);
164*4882a593Smuzhiyun
165*4882a593Smuzhiyun cpu_reg(host_ctxt, 1) = __pkvm_mark_hyp(start, end);
166*4882a593Smuzhiyun }
167*4882a593Smuzhiyun typedef void (*hcall_t)(struct kvm_cpu_context *);
168*4882a593Smuzhiyun
169*4882a593Smuzhiyun #define HANDLE_FUNC(x) [__KVM_HOST_SMCCC_FUNC_##x] = (hcall_t)handle_##x
170*4882a593Smuzhiyun
171*4882a593Smuzhiyun static const hcall_t host_hcall[] = {
172*4882a593Smuzhiyun HANDLE_FUNC(__kvm_vcpu_run),
173*4882a593Smuzhiyun HANDLE_FUNC(__kvm_flush_vm_context),
174*4882a593Smuzhiyun HANDLE_FUNC(__kvm_tlb_flush_vmid_ipa),
175*4882a593Smuzhiyun HANDLE_FUNC(__kvm_tlb_flush_vmid),
176*4882a593Smuzhiyun HANDLE_FUNC(__kvm_flush_cpu_context),
177*4882a593Smuzhiyun HANDLE_FUNC(__kvm_timer_set_cntvoff),
178*4882a593Smuzhiyun HANDLE_FUNC(__kvm_enable_ssbs),
179*4882a593Smuzhiyun HANDLE_FUNC(__vgic_v3_get_gic_config),
180*4882a593Smuzhiyun HANDLE_FUNC(__vgic_v3_read_vmcr),
181*4882a593Smuzhiyun HANDLE_FUNC(__vgic_v3_write_vmcr),
182*4882a593Smuzhiyun HANDLE_FUNC(__vgic_v3_init_lrs),
183*4882a593Smuzhiyun HANDLE_FUNC(__kvm_get_mdcr_el2),
184*4882a593Smuzhiyun HANDLE_FUNC(__vgic_v3_save_aprs),
185*4882a593Smuzhiyun HANDLE_FUNC(__vgic_v3_restore_aprs),
186*4882a593Smuzhiyun HANDLE_FUNC(__pkvm_init),
187*4882a593Smuzhiyun HANDLE_FUNC(__pkvm_cpu_set_vector),
188*4882a593Smuzhiyun HANDLE_FUNC(__pkvm_create_mappings),
189*4882a593Smuzhiyun HANDLE_FUNC(__pkvm_create_private_mapping),
190*4882a593Smuzhiyun HANDLE_FUNC(__pkvm_prot_finalize),
191*4882a593Smuzhiyun HANDLE_FUNC(__pkvm_mark_hyp),
192*4882a593Smuzhiyun };
193*4882a593Smuzhiyun
handle_host_hcall(struct kvm_cpu_context * host_ctxt)194*4882a593Smuzhiyun static void handle_host_hcall(struct kvm_cpu_context *host_ctxt)
195*4882a593Smuzhiyun {
196*4882a593Smuzhiyun DECLARE_REG(unsigned long, id, host_ctxt, 0);
197*4882a593Smuzhiyun hcall_t hfn;
198*4882a593Smuzhiyun
199*4882a593Smuzhiyun id -= KVM_HOST_SMCCC_ID(0);
200*4882a593Smuzhiyun
201*4882a593Smuzhiyun if (unlikely(id >= ARRAY_SIZE(host_hcall)))
202*4882a593Smuzhiyun goto inval;
203*4882a593Smuzhiyun
204*4882a593Smuzhiyun hfn = host_hcall[id];
205*4882a593Smuzhiyun if (unlikely(!hfn))
206*4882a593Smuzhiyun goto inval;
207*4882a593Smuzhiyun
208*4882a593Smuzhiyun cpu_reg(host_ctxt, 0) = SMCCC_RET_SUCCESS;
209*4882a593Smuzhiyun hfn(host_ctxt);
210*4882a593Smuzhiyun
211*4882a593Smuzhiyun return;
212*4882a593Smuzhiyun inval:
213*4882a593Smuzhiyun cpu_reg(host_ctxt, 0) = SMCCC_RET_NOT_SUPPORTED;
214*4882a593Smuzhiyun }
215*4882a593Smuzhiyun
default_host_smc_handler(struct kvm_cpu_context * host_ctxt)216*4882a593Smuzhiyun static void default_host_smc_handler(struct kvm_cpu_context *host_ctxt)
217*4882a593Smuzhiyun {
218*4882a593Smuzhiyun __kvm_hyp_host_forward_smc(host_ctxt);
219*4882a593Smuzhiyun }
220*4882a593Smuzhiyun
handle_host_smc(struct kvm_cpu_context * host_ctxt)221*4882a593Smuzhiyun static void handle_host_smc(struct kvm_cpu_context *host_ctxt)
222*4882a593Smuzhiyun {
223*4882a593Smuzhiyun bool handled;
224*4882a593Smuzhiyun
225*4882a593Smuzhiyun handled = kvm_host_psci_handler(host_ctxt);
226*4882a593Smuzhiyun if (!handled)
227*4882a593Smuzhiyun default_host_smc_handler(host_ctxt);
228*4882a593Smuzhiyun
229*4882a593Smuzhiyun /* SMC was trapped, move ELR past the current PC. */
230*4882a593Smuzhiyun kvm_skip_host_instr();
231*4882a593Smuzhiyun }
232*4882a593Smuzhiyun
handle_trap(struct kvm_cpu_context * host_ctxt)233*4882a593Smuzhiyun void handle_trap(struct kvm_cpu_context *host_ctxt)
234*4882a593Smuzhiyun {
235*4882a593Smuzhiyun u64 esr = read_sysreg_el2(SYS_ESR);
236*4882a593Smuzhiyun
237*4882a593Smuzhiyun switch (ESR_ELx_EC(esr)) {
238*4882a593Smuzhiyun case ESR_ELx_EC_HVC64:
239*4882a593Smuzhiyun handle_host_hcall(host_ctxt);
240*4882a593Smuzhiyun break;
241*4882a593Smuzhiyun case ESR_ELx_EC_SMC64:
242*4882a593Smuzhiyun handle_host_smc(host_ctxt);
243*4882a593Smuzhiyun break;
244*4882a593Smuzhiyun case ESR_ELx_EC_SVE:
245*4882a593Smuzhiyun sysreg_clear_set(cptr_el2, CPTR_EL2_TZ, 0);
246*4882a593Smuzhiyun isb();
247*4882a593Smuzhiyun sve_cond_update_zcr_vq(ZCR_ELx_LEN_MASK, SYS_ZCR_EL2);
248*4882a593Smuzhiyun break;
249*4882a593Smuzhiyun case ESR_ELx_EC_IABT_LOW:
250*4882a593Smuzhiyun case ESR_ELx_EC_DABT_LOW:
251*4882a593Smuzhiyun handle_host_mem_abort(host_ctxt);
252*4882a593Smuzhiyun break;
253*4882a593Smuzhiyun default:
254*4882a593Smuzhiyun hyp_panic();
255*4882a593Smuzhiyun }
256*4882a593Smuzhiyun }
257