1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Copyright (C) 2015 - ARM Ltd
4*4882a593Smuzhiyun * Author: Marc Zyngier <marc.zyngier@arm.com>
5*4882a593Smuzhiyun */
6*4882a593Smuzhiyun
7*4882a593Smuzhiyun #include <asm/kvm_hyp.h>
8*4882a593Smuzhiyun #include <asm/kvm_mmu.h>
9*4882a593Smuzhiyun #include <asm/tlbflush.h>
10*4882a593Smuzhiyun
11*4882a593Smuzhiyun #include <nvhe/mem_protect.h>
12*4882a593Smuzhiyun
13*4882a593Smuzhiyun struct tlb_inv_context {
14*4882a593Smuzhiyun u64 tcr;
15*4882a593Smuzhiyun };
16*4882a593Smuzhiyun
__tlb_switch_to_guest(struct kvm_s2_mmu * mmu,struct tlb_inv_context * cxt)17*4882a593Smuzhiyun static void __tlb_switch_to_guest(struct kvm_s2_mmu *mmu,
18*4882a593Smuzhiyun struct tlb_inv_context *cxt)
19*4882a593Smuzhiyun {
20*4882a593Smuzhiyun if (cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT)) {
21*4882a593Smuzhiyun u64 val;
22*4882a593Smuzhiyun
23*4882a593Smuzhiyun /*
24*4882a593Smuzhiyun * For CPUs that are affected by ARM 1319367, we need to
25*4882a593Smuzhiyun * avoid a host Stage-1 walk while we have the guest's
26*4882a593Smuzhiyun * VMID set in the VTTBR in order to invalidate TLBs.
27*4882a593Smuzhiyun * We're guaranteed that the S1 MMU is enabled, so we can
28*4882a593Smuzhiyun * simply set the EPD bits to avoid any further TLB fill.
29*4882a593Smuzhiyun */
30*4882a593Smuzhiyun val = cxt->tcr = read_sysreg_el1(SYS_TCR);
31*4882a593Smuzhiyun val |= TCR_EPD1_MASK | TCR_EPD0_MASK;
32*4882a593Smuzhiyun write_sysreg_el1(val, SYS_TCR);
33*4882a593Smuzhiyun isb();
34*4882a593Smuzhiyun }
35*4882a593Smuzhiyun
36*4882a593Smuzhiyun /*
37*4882a593Smuzhiyun * __load_guest_stage2() includes an ISB only when the AT
38*4882a593Smuzhiyun * workaround is applied. Take care of the opposite condition,
39*4882a593Smuzhiyun * ensuring that we always have an ISB, but not two ISBs back
40*4882a593Smuzhiyun * to back.
41*4882a593Smuzhiyun */
42*4882a593Smuzhiyun __load_guest_stage2(mmu);
43*4882a593Smuzhiyun asm(ALTERNATIVE("isb", "nop", ARM64_WORKAROUND_SPECULATIVE_AT));
44*4882a593Smuzhiyun }
45*4882a593Smuzhiyun
__tlb_switch_to_host(struct tlb_inv_context * cxt)46*4882a593Smuzhiyun static void __tlb_switch_to_host(struct tlb_inv_context *cxt)
47*4882a593Smuzhiyun {
48*4882a593Smuzhiyun __load_host_stage2();
49*4882a593Smuzhiyun
50*4882a593Smuzhiyun if (cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT)) {
51*4882a593Smuzhiyun /* Ensure write of the host VMID */
52*4882a593Smuzhiyun isb();
53*4882a593Smuzhiyun /* Restore the host's TCR_EL1 */
54*4882a593Smuzhiyun write_sysreg_el1(cxt->tcr, SYS_TCR);
55*4882a593Smuzhiyun }
56*4882a593Smuzhiyun }
57*4882a593Smuzhiyun
__kvm_tlb_flush_vmid_ipa(struct kvm_s2_mmu * mmu,phys_addr_t ipa,int level)58*4882a593Smuzhiyun void __kvm_tlb_flush_vmid_ipa(struct kvm_s2_mmu *mmu,
59*4882a593Smuzhiyun phys_addr_t ipa, int level)
60*4882a593Smuzhiyun {
61*4882a593Smuzhiyun struct tlb_inv_context cxt;
62*4882a593Smuzhiyun
63*4882a593Smuzhiyun dsb(ishst);
64*4882a593Smuzhiyun
65*4882a593Smuzhiyun /* Switch to requested VMID */
66*4882a593Smuzhiyun __tlb_switch_to_guest(mmu, &cxt);
67*4882a593Smuzhiyun
68*4882a593Smuzhiyun /*
69*4882a593Smuzhiyun * We could do so much better if we had the VA as well.
70*4882a593Smuzhiyun * Instead, we invalidate Stage-2 for this IPA, and the
71*4882a593Smuzhiyun * whole of Stage-1. Weep...
72*4882a593Smuzhiyun */
73*4882a593Smuzhiyun ipa >>= 12;
74*4882a593Smuzhiyun __tlbi_level(ipas2e1is, ipa, level);
75*4882a593Smuzhiyun
76*4882a593Smuzhiyun /*
77*4882a593Smuzhiyun * We have to ensure completion of the invalidation at Stage-2,
78*4882a593Smuzhiyun * since a table walk on another CPU could refill a TLB with a
79*4882a593Smuzhiyun * complete (S1 + S2) walk based on the old Stage-2 mapping if
80*4882a593Smuzhiyun * the Stage-1 invalidation happened first.
81*4882a593Smuzhiyun */
82*4882a593Smuzhiyun dsb(ish);
83*4882a593Smuzhiyun __tlbi(vmalle1is);
84*4882a593Smuzhiyun dsb(ish);
85*4882a593Smuzhiyun isb();
86*4882a593Smuzhiyun
87*4882a593Smuzhiyun /*
88*4882a593Smuzhiyun * If the host is running at EL1 and we have a VPIPT I-cache,
89*4882a593Smuzhiyun * then we must perform I-cache maintenance at EL2 in order for
90*4882a593Smuzhiyun * it to have an effect on the guest. Since the guest cannot hit
91*4882a593Smuzhiyun * I-cache lines allocated with a different VMID, we don't need
92*4882a593Smuzhiyun * to worry about junk out of guest reset (we nuke the I-cache on
93*4882a593Smuzhiyun * VMID rollover), but we do need to be careful when remapping
94*4882a593Smuzhiyun * executable pages for the same guest. This can happen when KSM
95*4882a593Smuzhiyun * takes a CoW fault on an executable page, copies the page into
96*4882a593Smuzhiyun * a page that was previously mapped in the guest and then needs
97*4882a593Smuzhiyun * to invalidate the guest view of the I-cache for that page
98*4882a593Smuzhiyun * from EL1. To solve this, we invalidate the entire I-cache when
99*4882a593Smuzhiyun * unmapping a page from a guest if we have a VPIPT I-cache but
100*4882a593Smuzhiyun * the host is running at EL1. As above, we could do better if
101*4882a593Smuzhiyun * we had the VA.
102*4882a593Smuzhiyun *
103*4882a593Smuzhiyun * The moral of this story is: if you have a VPIPT I-cache, then
104*4882a593Smuzhiyun * you should be running with VHE enabled.
105*4882a593Smuzhiyun */
106*4882a593Smuzhiyun if (icache_is_vpipt())
107*4882a593Smuzhiyun __flush_icache_all();
108*4882a593Smuzhiyun
109*4882a593Smuzhiyun __tlb_switch_to_host(&cxt);
110*4882a593Smuzhiyun }
111*4882a593Smuzhiyun
__kvm_tlb_flush_vmid(struct kvm_s2_mmu * mmu)112*4882a593Smuzhiyun void __kvm_tlb_flush_vmid(struct kvm_s2_mmu *mmu)
113*4882a593Smuzhiyun {
114*4882a593Smuzhiyun struct tlb_inv_context cxt;
115*4882a593Smuzhiyun
116*4882a593Smuzhiyun dsb(ishst);
117*4882a593Smuzhiyun
118*4882a593Smuzhiyun /* Switch to requested VMID */
119*4882a593Smuzhiyun __tlb_switch_to_guest(mmu, &cxt);
120*4882a593Smuzhiyun
121*4882a593Smuzhiyun __tlbi(vmalls12e1is);
122*4882a593Smuzhiyun dsb(ish);
123*4882a593Smuzhiyun isb();
124*4882a593Smuzhiyun
125*4882a593Smuzhiyun __tlb_switch_to_host(&cxt);
126*4882a593Smuzhiyun }
127*4882a593Smuzhiyun
__kvm_flush_cpu_context(struct kvm_s2_mmu * mmu)128*4882a593Smuzhiyun void __kvm_flush_cpu_context(struct kvm_s2_mmu *mmu)
129*4882a593Smuzhiyun {
130*4882a593Smuzhiyun struct tlb_inv_context cxt;
131*4882a593Smuzhiyun
132*4882a593Smuzhiyun /* Switch to requested VMID */
133*4882a593Smuzhiyun __tlb_switch_to_guest(mmu, &cxt);
134*4882a593Smuzhiyun
135*4882a593Smuzhiyun __tlbi(vmalle1);
136*4882a593Smuzhiyun asm volatile("ic iallu");
137*4882a593Smuzhiyun dsb(nsh);
138*4882a593Smuzhiyun isb();
139*4882a593Smuzhiyun
140*4882a593Smuzhiyun __tlb_switch_to_host(&cxt);
141*4882a593Smuzhiyun }
142*4882a593Smuzhiyun
__kvm_flush_vm_context(void)143*4882a593Smuzhiyun void __kvm_flush_vm_context(void)
144*4882a593Smuzhiyun {
145*4882a593Smuzhiyun dsb(ishst);
146*4882a593Smuzhiyun __tlbi(alle1is);
147*4882a593Smuzhiyun
148*4882a593Smuzhiyun /*
149*4882a593Smuzhiyun * VIPT and PIPT caches are not affected by VMID, so no maintenance
150*4882a593Smuzhiyun * is necessary across a VMID rollover.
151*4882a593Smuzhiyun *
152*4882a593Smuzhiyun * VPIPT caches constrain lookup and maintenance to the active VMID,
153*4882a593Smuzhiyun * so we need to invalidate lines with a stale VMID to avoid an ABA
154*4882a593Smuzhiyun * race after multiple rollovers.
155*4882a593Smuzhiyun *
156*4882a593Smuzhiyun */
157*4882a593Smuzhiyun if (icache_is_vpipt())
158*4882a593Smuzhiyun asm volatile("ic ialluis");
159*4882a593Smuzhiyun
160*4882a593Smuzhiyun dsb(ish);
161*4882a593Smuzhiyun }
162