xref: /OK3568_Linux_fs/kernel/arch/arm64/kvm/mmu.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Copyright (C) 2012 - Virtual Open Systems and Columbia University
4*4882a593Smuzhiyun  * Author: Christoffer Dall <c.dall@virtualopensystems.com>
5*4882a593Smuzhiyun  */
6*4882a593Smuzhiyun 
7*4882a593Smuzhiyun #include <linux/mman.h>
8*4882a593Smuzhiyun #include <linux/kvm_host.h>
9*4882a593Smuzhiyun #include <linux/io.h>
10*4882a593Smuzhiyun #include <linux/hugetlb.h>
11*4882a593Smuzhiyun #include <linux/sched/signal.h>
12*4882a593Smuzhiyun #include <trace/events/kvm.h>
13*4882a593Smuzhiyun #include <asm/pgalloc.h>
14*4882a593Smuzhiyun #include <asm/cacheflush.h>
15*4882a593Smuzhiyun #include <asm/kvm_arm.h>
16*4882a593Smuzhiyun #include <asm/kvm_mmu.h>
17*4882a593Smuzhiyun #include <asm/kvm_pgtable.h>
18*4882a593Smuzhiyun #include <asm/kvm_ras.h>
19*4882a593Smuzhiyun #include <asm/kvm_asm.h>
20*4882a593Smuzhiyun #include <asm/kvm_emulate.h>
21*4882a593Smuzhiyun #include <asm/virt.h>
22*4882a593Smuzhiyun 
23*4882a593Smuzhiyun #include "trace.h"
24*4882a593Smuzhiyun 
25*4882a593Smuzhiyun static struct kvm_pgtable *hyp_pgtable;
26*4882a593Smuzhiyun static DEFINE_MUTEX(kvm_hyp_pgd_mutex);
27*4882a593Smuzhiyun 
28*4882a593Smuzhiyun static unsigned long hyp_idmap_start;
29*4882a593Smuzhiyun static unsigned long hyp_idmap_end;
30*4882a593Smuzhiyun static phys_addr_t hyp_idmap_vector;
31*4882a593Smuzhiyun 
32*4882a593Smuzhiyun static unsigned long io_map_base;
33*4882a593Smuzhiyun 
34*4882a593Smuzhiyun 
35*4882a593Smuzhiyun /*
36*4882a593Smuzhiyun  * Release kvm_mmu_lock periodically if the memory region is large. Otherwise,
37*4882a593Smuzhiyun  * we may see kernel panics with CONFIG_DETECT_HUNG_TASK,
38*4882a593Smuzhiyun  * CONFIG_LOCKUP_DETECTOR, CONFIG_LOCKDEP. Additionally, holding the lock too
39*4882a593Smuzhiyun  * long will also starve other vCPUs. We have to also make sure that the page
40*4882a593Smuzhiyun  * tables are not freed while we released the lock.
41*4882a593Smuzhiyun  */
stage2_apply_range(struct kvm * kvm,phys_addr_t addr,phys_addr_t end,int (* fn)(struct kvm_pgtable *,u64,u64),bool resched)42*4882a593Smuzhiyun static int stage2_apply_range(struct kvm *kvm, phys_addr_t addr,
43*4882a593Smuzhiyun 			      phys_addr_t end,
44*4882a593Smuzhiyun 			      int (*fn)(struct kvm_pgtable *, u64, u64),
45*4882a593Smuzhiyun 			      bool resched)
46*4882a593Smuzhiyun {
47*4882a593Smuzhiyun 	int ret;
48*4882a593Smuzhiyun 	u64 next;
49*4882a593Smuzhiyun 
50*4882a593Smuzhiyun 	do {
51*4882a593Smuzhiyun 		struct kvm_pgtable *pgt = kvm->arch.mmu.pgt;
52*4882a593Smuzhiyun 		if (!pgt)
53*4882a593Smuzhiyun 			return -EINVAL;
54*4882a593Smuzhiyun 
55*4882a593Smuzhiyun 		next = stage2_pgd_addr_end(kvm, addr, end);
56*4882a593Smuzhiyun 		ret = fn(pgt, addr, next - addr);
57*4882a593Smuzhiyun 		if (ret)
58*4882a593Smuzhiyun 			break;
59*4882a593Smuzhiyun 
60*4882a593Smuzhiyun 		if (resched && next != end)
61*4882a593Smuzhiyun 			cond_resched_lock(&kvm->mmu_lock);
62*4882a593Smuzhiyun 	} while (addr = next, addr != end);
63*4882a593Smuzhiyun 
64*4882a593Smuzhiyun 	return ret;
65*4882a593Smuzhiyun }
66*4882a593Smuzhiyun 
67*4882a593Smuzhiyun #define stage2_apply_range_resched(kvm, addr, end, fn)			\
68*4882a593Smuzhiyun 	stage2_apply_range(kvm, addr, end, fn, true)
69*4882a593Smuzhiyun 
memslot_is_logging(struct kvm_memory_slot * memslot)70*4882a593Smuzhiyun static bool memslot_is_logging(struct kvm_memory_slot *memslot)
71*4882a593Smuzhiyun {
72*4882a593Smuzhiyun 	return memslot->dirty_bitmap && !(memslot->flags & KVM_MEM_READONLY);
73*4882a593Smuzhiyun }
74*4882a593Smuzhiyun 
75*4882a593Smuzhiyun /**
76*4882a593Smuzhiyun  * kvm_flush_remote_tlbs() - flush all VM TLB entries for v7/8
77*4882a593Smuzhiyun  * @kvm:	pointer to kvm structure.
78*4882a593Smuzhiyun  *
79*4882a593Smuzhiyun  * Interface to HYP function to flush all VM TLB entries
80*4882a593Smuzhiyun  */
kvm_flush_remote_tlbs(struct kvm * kvm)81*4882a593Smuzhiyun void kvm_flush_remote_tlbs(struct kvm *kvm)
82*4882a593Smuzhiyun {
83*4882a593Smuzhiyun 	kvm_call_hyp(__kvm_tlb_flush_vmid, &kvm->arch.mmu);
84*4882a593Smuzhiyun }
85*4882a593Smuzhiyun 
kvm_is_device_pfn(unsigned long pfn)86*4882a593Smuzhiyun static bool kvm_is_device_pfn(unsigned long pfn)
87*4882a593Smuzhiyun {
88*4882a593Smuzhiyun 	return !pfn_valid(pfn);
89*4882a593Smuzhiyun }
90*4882a593Smuzhiyun 
stage2_memcache_zalloc_page(void * arg)91*4882a593Smuzhiyun static void *stage2_memcache_zalloc_page(void *arg)
92*4882a593Smuzhiyun {
93*4882a593Smuzhiyun 	struct kvm_mmu_memory_cache *mc = arg;
94*4882a593Smuzhiyun 
95*4882a593Smuzhiyun 	/* Allocated with __GFP_ZERO, so no need to zero */
96*4882a593Smuzhiyun 	return kvm_mmu_memory_cache_alloc(mc);
97*4882a593Smuzhiyun }
98*4882a593Smuzhiyun 
kvm_host_zalloc_pages_exact(size_t size)99*4882a593Smuzhiyun static void *kvm_host_zalloc_pages_exact(size_t size)
100*4882a593Smuzhiyun {
101*4882a593Smuzhiyun 	return alloc_pages_exact(size, GFP_KERNEL_ACCOUNT | __GFP_ZERO);
102*4882a593Smuzhiyun }
103*4882a593Smuzhiyun 
kvm_host_get_page(void * addr)104*4882a593Smuzhiyun static void kvm_host_get_page(void *addr)
105*4882a593Smuzhiyun {
106*4882a593Smuzhiyun 	get_page(virt_to_page(addr));
107*4882a593Smuzhiyun }
108*4882a593Smuzhiyun 
kvm_host_put_page(void * addr)109*4882a593Smuzhiyun static void kvm_host_put_page(void *addr)
110*4882a593Smuzhiyun {
111*4882a593Smuzhiyun 	put_page(virt_to_page(addr));
112*4882a593Smuzhiyun }
113*4882a593Smuzhiyun 
kvm_host_page_count(void * addr)114*4882a593Smuzhiyun static int kvm_host_page_count(void *addr)
115*4882a593Smuzhiyun {
116*4882a593Smuzhiyun 	return page_count(virt_to_page(addr));
117*4882a593Smuzhiyun }
118*4882a593Smuzhiyun 
kvm_host_pa(void * addr)119*4882a593Smuzhiyun static phys_addr_t kvm_host_pa(void *addr)
120*4882a593Smuzhiyun {
121*4882a593Smuzhiyun 	return __pa(addr);
122*4882a593Smuzhiyun }
123*4882a593Smuzhiyun 
kvm_host_va(phys_addr_t phys)124*4882a593Smuzhiyun static void *kvm_host_va(phys_addr_t phys)
125*4882a593Smuzhiyun {
126*4882a593Smuzhiyun 	return __va(phys);
127*4882a593Smuzhiyun }
128*4882a593Smuzhiyun 
129*4882a593Smuzhiyun /*
130*4882a593Smuzhiyun  * Unmapping vs dcache management:
131*4882a593Smuzhiyun  *
132*4882a593Smuzhiyun  * If a guest maps certain memory pages as uncached, all writes will
133*4882a593Smuzhiyun  * bypass the data cache and go directly to RAM.  However, the CPUs
134*4882a593Smuzhiyun  * can still speculate reads (not writes) and fill cache lines with
135*4882a593Smuzhiyun  * data.
136*4882a593Smuzhiyun  *
137*4882a593Smuzhiyun  * Those cache lines will be *clean* cache lines though, so a
138*4882a593Smuzhiyun  * clean+invalidate operation is equivalent to an invalidate
139*4882a593Smuzhiyun  * operation, because no cache lines are marked dirty.
140*4882a593Smuzhiyun  *
141*4882a593Smuzhiyun  * Those clean cache lines could be filled prior to an uncached write
142*4882a593Smuzhiyun  * by the guest, and the cache coherent IO subsystem would therefore
143*4882a593Smuzhiyun  * end up writing old data to disk.
144*4882a593Smuzhiyun  *
145*4882a593Smuzhiyun  * This is why right after unmapping a page/section and invalidating
146*4882a593Smuzhiyun  * the corresponding TLBs, we flush to make sure the IO subsystem will
147*4882a593Smuzhiyun  * never hit in the cache.
148*4882a593Smuzhiyun  *
149*4882a593Smuzhiyun  * This is all avoided on systems that have ARM64_HAS_STAGE2_FWB, as
150*4882a593Smuzhiyun  * we then fully enforce cacheability of RAM, no matter what the guest
151*4882a593Smuzhiyun  * does.
152*4882a593Smuzhiyun  */
153*4882a593Smuzhiyun /**
154*4882a593Smuzhiyun  * unmap_stage2_range -- Clear stage2 page table entries to unmap a range
155*4882a593Smuzhiyun  * @mmu:   The KVM stage-2 MMU pointer
156*4882a593Smuzhiyun  * @start: The intermediate physical base address of the range to unmap
157*4882a593Smuzhiyun  * @size:  The size of the area to unmap
158*4882a593Smuzhiyun  * @may_block: Whether or not we are permitted to block
159*4882a593Smuzhiyun  *
160*4882a593Smuzhiyun  * Clear a range of stage-2 mappings, lowering the various ref-counts.  Must
161*4882a593Smuzhiyun  * be called while holding mmu_lock (unless for freeing the stage2 pgd before
162*4882a593Smuzhiyun  * destroying the VM), otherwise another faulting VCPU may come in and mess
163*4882a593Smuzhiyun  * with things behind our backs.
164*4882a593Smuzhiyun  */
__unmap_stage2_range(struct kvm_s2_mmu * mmu,phys_addr_t start,u64 size,bool may_block)165*4882a593Smuzhiyun static void __unmap_stage2_range(struct kvm_s2_mmu *mmu, phys_addr_t start, u64 size,
166*4882a593Smuzhiyun 				 bool may_block)
167*4882a593Smuzhiyun {
168*4882a593Smuzhiyun 	struct kvm *kvm = kvm_s2_mmu_to_kvm(mmu);
169*4882a593Smuzhiyun 	phys_addr_t end = start + size;
170*4882a593Smuzhiyun 
171*4882a593Smuzhiyun 	assert_spin_locked(&kvm->mmu_lock);
172*4882a593Smuzhiyun 	WARN_ON(size & ~PAGE_MASK);
173*4882a593Smuzhiyun 	WARN_ON(stage2_apply_range(kvm, start, end, kvm_pgtable_stage2_unmap,
174*4882a593Smuzhiyun 				   may_block));
175*4882a593Smuzhiyun }
176*4882a593Smuzhiyun 
unmap_stage2_range(struct kvm_s2_mmu * mmu,phys_addr_t start,u64 size)177*4882a593Smuzhiyun static void unmap_stage2_range(struct kvm_s2_mmu *mmu, phys_addr_t start, u64 size)
178*4882a593Smuzhiyun {
179*4882a593Smuzhiyun 	__unmap_stage2_range(mmu, start, size, true);
180*4882a593Smuzhiyun }
181*4882a593Smuzhiyun 
stage2_flush_memslot(struct kvm * kvm,struct kvm_memory_slot * memslot)182*4882a593Smuzhiyun static void stage2_flush_memslot(struct kvm *kvm,
183*4882a593Smuzhiyun 				 struct kvm_memory_slot *memslot)
184*4882a593Smuzhiyun {
185*4882a593Smuzhiyun 	phys_addr_t addr = memslot->base_gfn << PAGE_SHIFT;
186*4882a593Smuzhiyun 	phys_addr_t end = addr + PAGE_SIZE * memslot->npages;
187*4882a593Smuzhiyun 
188*4882a593Smuzhiyun 	stage2_apply_range_resched(kvm, addr, end, kvm_pgtable_stage2_flush);
189*4882a593Smuzhiyun }
190*4882a593Smuzhiyun 
191*4882a593Smuzhiyun /**
192*4882a593Smuzhiyun  * stage2_flush_vm - Invalidate cache for pages mapped in stage 2
193*4882a593Smuzhiyun  * @kvm: The struct kvm pointer
194*4882a593Smuzhiyun  *
195*4882a593Smuzhiyun  * Go through the stage 2 page tables and invalidate any cache lines
196*4882a593Smuzhiyun  * backing memory already mapped to the VM.
197*4882a593Smuzhiyun  */
stage2_flush_vm(struct kvm * kvm)198*4882a593Smuzhiyun static void stage2_flush_vm(struct kvm *kvm)
199*4882a593Smuzhiyun {
200*4882a593Smuzhiyun 	struct kvm_memslots *slots;
201*4882a593Smuzhiyun 	struct kvm_memory_slot *memslot;
202*4882a593Smuzhiyun 	int idx;
203*4882a593Smuzhiyun 
204*4882a593Smuzhiyun 	idx = srcu_read_lock(&kvm->srcu);
205*4882a593Smuzhiyun 	spin_lock(&kvm->mmu_lock);
206*4882a593Smuzhiyun 
207*4882a593Smuzhiyun 	slots = kvm_memslots(kvm);
208*4882a593Smuzhiyun 	kvm_for_each_memslot(memslot, slots)
209*4882a593Smuzhiyun 		stage2_flush_memslot(kvm, memslot);
210*4882a593Smuzhiyun 
211*4882a593Smuzhiyun 	spin_unlock(&kvm->mmu_lock);
212*4882a593Smuzhiyun 	srcu_read_unlock(&kvm->srcu, idx);
213*4882a593Smuzhiyun }
214*4882a593Smuzhiyun 
215*4882a593Smuzhiyun /**
216*4882a593Smuzhiyun  * free_hyp_pgds - free Hyp-mode page tables
217*4882a593Smuzhiyun  */
free_hyp_pgds(void)218*4882a593Smuzhiyun void free_hyp_pgds(void)
219*4882a593Smuzhiyun {
220*4882a593Smuzhiyun 	mutex_lock(&kvm_hyp_pgd_mutex);
221*4882a593Smuzhiyun 	if (hyp_pgtable) {
222*4882a593Smuzhiyun 		kvm_pgtable_hyp_destroy(hyp_pgtable);
223*4882a593Smuzhiyun 		kfree(hyp_pgtable);
224*4882a593Smuzhiyun 		hyp_pgtable = NULL;
225*4882a593Smuzhiyun 	}
226*4882a593Smuzhiyun 	mutex_unlock(&kvm_hyp_pgd_mutex);
227*4882a593Smuzhiyun }
228*4882a593Smuzhiyun 
kvm_host_owns_hyp_mappings(void)229*4882a593Smuzhiyun static bool kvm_host_owns_hyp_mappings(void)
230*4882a593Smuzhiyun {
231*4882a593Smuzhiyun 	if (static_branch_likely(&kvm_protected_mode_initialized))
232*4882a593Smuzhiyun 		return false;
233*4882a593Smuzhiyun 
234*4882a593Smuzhiyun 	/*
235*4882a593Smuzhiyun 	 * This can happen at boot time when __create_hyp_mappings() is called
236*4882a593Smuzhiyun 	 * after the hyp protection has been enabled, but the static key has
237*4882a593Smuzhiyun 	 * not been flipped yet.
238*4882a593Smuzhiyun 	 */
239*4882a593Smuzhiyun 	if (!hyp_pgtable && is_protected_kvm_enabled())
240*4882a593Smuzhiyun 		return false;
241*4882a593Smuzhiyun 
242*4882a593Smuzhiyun 	WARN_ON(!hyp_pgtable);
243*4882a593Smuzhiyun 
244*4882a593Smuzhiyun 	return true;
245*4882a593Smuzhiyun }
246*4882a593Smuzhiyun 
__create_hyp_mappings(unsigned long start,unsigned long size,unsigned long phys,enum kvm_pgtable_prot prot)247*4882a593Smuzhiyun static int __create_hyp_mappings(unsigned long start, unsigned long size,
248*4882a593Smuzhiyun 				 unsigned long phys, enum kvm_pgtable_prot prot)
249*4882a593Smuzhiyun {
250*4882a593Smuzhiyun 	int err;
251*4882a593Smuzhiyun 
252*4882a593Smuzhiyun 	if (!kvm_host_owns_hyp_mappings()) {
253*4882a593Smuzhiyun 		return kvm_call_hyp_nvhe(__pkvm_create_mappings,
254*4882a593Smuzhiyun 					 start, size, phys, prot);
255*4882a593Smuzhiyun 	}
256*4882a593Smuzhiyun 
257*4882a593Smuzhiyun 	mutex_lock(&kvm_hyp_pgd_mutex);
258*4882a593Smuzhiyun 	err = kvm_pgtable_hyp_map(hyp_pgtable, start, size, phys, prot);
259*4882a593Smuzhiyun 	mutex_unlock(&kvm_hyp_pgd_mutex);
260*4882a593Smuzhiyun 
261*4882a593Smuzhiyun 	return err;
262*4882a593Smuzhiyun }
263*4882a593Smuzhiyun 
kvm_kaddr_to_phys(void * kaddr)264*4882a593Smuzhiyun static phys_addr_t kvm_kaddr_to_phys(void *kaddr)
265*4882a593Smuzhiyun {
266*4882a593Smuzhiyun 	if (!is_vmalloc_addr(kaddr)) {
267*4882a593Smuzhiyun 		BUG_ON(!virt_addr_valid(kaddr));
268*4882a593Smuzhiyun 		return __pa(kaddr);
269*4882a593Smuzhiyun 	} else {
270*4882a593Smuzhiyun 		return page_to_phys(vmalloc_to_page(kaddr)) +
271*4882a593Smuzhiyun 		       offset_in_page(kaddr);
272*4882a593Smuzhiyun 	}
273*4882a593Smuzhiyun }
274*4882a593Smuzhiyun 
275*4882a593Smuzhiyun /**
276*4882a593Smuzhiyun  * create_hyp_mappings - duplicate a kernel virtual address range in Hyp mode
277*4882a593Smuzhiyun  * @from:	The virtual kernel start address of the range
278*4882a593Smuzhiyun  * @to:		The virtual kernel end address of the range (exclusive)
279*4882a593Smuzhiyun  * @prot:	The protection to be applied to this range
280*4882a593Smuzhiyun  *
281*4882a593Smuzhiyun  * The same virtual address as the kernel virtual address is also used
282*4882a593Smuzhiyun  * in Hyp-mode mapping (modulo HYP_PAGE_OFFSET) to the same underlying
283*4882a593Smuzhiyun  * physical pages.
284*4882a593Smuzhiyun  */
create_hyp_mappings(void * from,void * to,enum kvm_pgtable_prot prot)285*4882a593Smuzhiyun int create_hyp_mappings(void *from, void *to, enum kvm_pgtable_prot prot)
286*4882a593Smuzhiyun {
287*4882a593Smuzhiyun 	phys_addr_t phys_addr;
288*4882a593Smuzhiyun 	unsigned long virt_addr;
289*4882a593Smuzhiyun 	unsigned long start = kern_hyp_va((unsigned long)from);
290*4882a593Smuzhiyun 	unsigned long end = kern_hyp_va((unsigned long)to);
291*4882a593Smuzhiyun 
292*4882a593Smuzhiyun 	if (is_kernel_in_hyp_mode())
293*4882a593Smuzhiyun 		return 0;
294*4882a593Smuzhiyun 
295*4882a593Smuzhiyun 	start = start & PAGE_MASK;
296*4882a593Smuzhiyun 	end = PAGE_ALIGN(end);
297*4882a593Smuzhiyun 
298*4882a593Smuzhiyun 	for (virt_addr = start; virt_addr < end; virt_addr += PAGE_SIZE) {
299*4882a593Smuzhiyun 		int err;
300*4882a593Smuzhiyun 
301*4882a593Smuzhiyun 		phys_addr = kvm_kaddr_to_phys(from + virt_addr - start);
302*4882a593Smuzhiyun 		err = __create_hyp_mappings(virt_addr, PAGE_SIZE, phys_addr,
303*4882a593Smuzhiyun 					    prot);
304*4882a593Smuzhiyun 		if (err)
305*4882a593Smuzhiyun 			return err;
306*4882a593Smuzhiyun 	}
307*4882a593Smuzhiyun 
308*4882a593Smuzhiyun 	return 0;
309*4882a593Smuzhiyun }
310*4882a593Smuzhiyun 
__create_hyp_private_mapping(phys_addr_t phys_addr,size_t size,unsigned long * haddr,enum kvm_pgtable_prot prot)311*4882a593Smuzhiyun static int __create_hyp_private_mapping(phys_addr_t phys_addr, size_t size,
312*4882a593Smuzhiyun 					unsigned long *haddr,
313*4882a593Smuzhiyun 					enum kvm_pgtable_prot prot)
314*4882a593Smuzhiyun {
315*4882a593Smuzhiyun 	unsigned long base;
316*4882a593Smuzhiyun 	int ret = 0;
317*4882a593Smuzhiyun 
318*4882a593Smuzhiyun 	if (!kvm_host_owns_hyp_mappings()) {
319*4882a593Smuzhiyun 		base = kvm_call_hyp_nvhe(__pkvm_create_private_mapping,
320*4882a593Smuzhiyun 					 phys_addr, size, prot);
321*4882a593Smuzhiyun 		if (IS_ERR_OR_NULL((void *)base))
322*4882a593Smuzhiyun 			return PTR_ERR((void *)base);
323*4882a593Smuzhiyun 		*haddr = base;
324*4882a593Smuzhiyun 
325*4882a593Smuzhiyun 		return 0;
326*4882a593Smuzhiyun 	}
327*4882a593Smuzhiyun 
328*4882a593Smuzhiyun 	mutex_lock(&kvm_hyp_pgd_mutex);
329*4882a593Smuzhiyun 
330*4882a593Smuzhiyun 	/*
331*4882a593Smuzhiyun 	 * This assumes that we have enough space below the idmap
332*4882a593Smuzhiyun 	 * page to allocate our VAs. If not, the check below will
333*4882a593Smuzhiyun 	 * kick. A potential alternative would be to detect that
334*4882a593Smuzhiyun 	 * overflow and switch to an allocation above the idmap.
335*4882a593Smuzhiyun 	 *
336*4882a593Smuzhiyun 	 * The allocated size is always a multiple of PAGE_SIZE.
337*4882a593Smuzhiyun 	 */
338*4882a593Smuzhiyun 	size = PAGE_ALIGN(size + offset_in_page(phys_addr));
339*4882a593Smuzhiyun 	base = io_map_base - size;
340*4882a593Smuzhiyun 
341*4882a593Smuzhiyun 	/*
342*4882a593Smuzhiyun 	 * Verify that BIT(VA_BITS - 1) hasn't been flipped by
343*4882a593Smuzhiyun 	 * allocating the new area, as it would indicate we've
344*4882a593Smuzhiyun 	 * overflowed the idmap/IO address range.
345*4882a593Smuzhiyun 	 */
346*4882a593Smuzhiyun 	if ((base ^ io_map_base) & BIT(VA_BITS - 1))
347*4882a593Smuzhiyun 		ret = -ENOMEM;
348*4882a593Smuzhiyun 	else
349*4882a593Smuzhiyun 		io_map_base = base;
350*4882a593Smuzhiyun 
351*4882a593Smuzhiyun 	mutex_unlock(&kvm_hyp_pgd_mutex);
352*4882a593Smuzhiyun 
353*4882a593Smuzhiyun 	if (ret)
354*4882a593Smuzhiyun 		goto out;
355*4882a593Smuzhiyun 
356*4882a593Smuzhiyun 	ret = __create_hyp_mappings(base, size, phys_addr, prot);
357*4882a593Smuzhiyun 	if (ret)
358*4882a593Smuzhiyun 		goto out;
359*4882a593Smuzhiyun 
360*4882a593Smuzhiyun 	*haddr = base + offset_in_page(phys_addr);
361*4882a593Smuzhiyun out:
362*4882a593Smuzhiyun 	return ret;
363*4882a593Smuzhiyun }
364*4882a593Smuzhiyun 
365*4882a593Smuzhiyun /**
366*4882a593Smuzhiyun  * create_hyp_io_mappings - Map IO into both kernel and HYP
367*4882a593Smuzhiyun  * @phys_addr:	The physical start address which gets mapped
368*4882a593Smuzhiyun  * @size:	Size of the region being mapped
369*4882a593Smuzhiyun  * @kaddr:	Kernel VA for this mapping
370*4882a593Smuzhiyun  * @haddr:	HYP VA for this mapping
371*4882a593Smuzhiyun  */
create_hyp_io_mappings(phys_addr_t phys_addr,size_t size,void __iomem ** kaddr,void __iomem ** haddr)372*4882a593Smuzhiyun int create_hyp_io_mappings(phys_addr_t phys_addr, size_t size,
373*4882a593Smuzhiyun 			   void __iomem **kaddr,
374*4882a593Smuzhiyun 			   void __iomem **haddr)
375*4882a593Smuzhiyun {
376*4882a593Smuzhiyun 	unsigned long addr;
377*4882a593Smuzhiyun 	int ret;
378*4882a593Smuzhiyun 
379*4882a593Smuzhiyun 	*kaddr = ioremap(phys_addr, size);
380*4882a593Smuzhiyun 	if (!*kaddr)
381*4882a593Smuzhiyun 		return -ENOMEM;
382*4882a593Smuzhiyun 
383*4882a593Smuzhiyun 	if (is_kernel_in_hyp_mode()) {
384*4882a593Smuzhiyun 		*haddr = *kaddr;
385*4882a593Smuzhiyun 		return 0;
386*4882a593Smuzhiyun 	}
387*4882a593Smuzhiyun 
388*4882a593Smuzhiyun 	ret = __create_hyp_private_mapping(phys_addr, size,
389*4882a593Smuzhiyun 					   &addr, PAGE_HYP_DEVICE);
390*4882a593Smuzhiyun 	if (ret) {
391*4882a593Smuzhiyun 		iounmap(*kaddr);
392*4882a593Smuzhiyun 		*kaddr = NULL;
393*4882a593Smuzhiyun 		*haddr = NULL;
394*4882a593Smuzhiyun 		return ret;
395*4882a593Smuzhiyun 	}
396*4882a593Smuzhiyun 
397*4882a593Smuzhiyun 	*haddr = (void __iomem *)addr;
398*4882a593Smuzhiyun 	return 0;
399*4882a593Smuzhiyun }
400*4882a593Smuzhiyun 
401*4882a593Smuzhiyun /**
402*4882a593Smuzhiyun  * create_hyp_exec_mappings - Map an executable range into HYP
403*4882a593Smuzhiyun  * @phys_addr:	The physical start address which gets mapped
404*4882a593Smuzhiyun  * @size:	Size of the region being mapped
405*4882a593Smuzhiyun  * @haddr:	HYP VA for this mapping
406*4882a593Smuzhiyun  */
create_hyp_exec_mappings(phys_addr_t phys_addr,size_t size,void ** haddr)407*4882a593Smuzhiyun int create_hyp_exec_mappings(phys_addr_t phys_addr, size_t size,
408*4882a593Smuzhiyun 			     void **haddr)
409*4882a593Smuzhiyun {
410*4882a593Smuzhiyun 	unsigned long addr;
411*4882a593Smuzhiyun 	int ret;
412*4882a593Smuzhiyun 
413*4882a593Smuzhiyun 	BUG_ON(is_kernel_in_hyp_mode());
414*4882a593Smuzhiyun 
415*4882a593Smuzhiyun 	ret = __create_hyp_private_mapping(phys_addr, size,
416*4882a593Smuzhiyun 					   &addr, PAGE_HYP_EXEC);
417*4882a593Smuzhiyun 	if (ret) {
418*4882a593Smuzhiyun 		*haddr = NULL;
419*4882a593Smuzhiyun 		return ret;
420*4882a593Smuzhiyun 	}
421*4882a593Smuzhiyun 
422*4882a593Smuzhiyun 	*haddr = (void *)addr;
423*4882a593Smuzhiyun 	return 0;
424*4882a593Smuzhiyun }
425*4882a593Smuzhiyun 
426*4882a593Smuzhiyun static struct kvm_pgtable_mm_ops kvm_s2_mm_ops = {
427*4882a593Smuzhiyun 	.zalloc_page		= stage2_memcache_zalloc_page,
428*4882a593Smuzhiyun 	.zalloc_pages_exact	= kvm_host_zalloc_pages_exact,
429*4882a593Smuzhiyun 	.free_pages_exact	= free_pages_exact,
430*4882a593Smuzhiyun 	.get_page		= kvm_host_get_page,
431*4882a593Smuzhiyun 	.put_page		= kvm_host_put_page,
432*4882a593Smuzhiyun 	.page_count		= kvm_host_page_count,
433*4882a593Smuzhiyun 	.phys_to_virt		= kvm_host_va,
434*4882a593Smuzhiyun 	.virt_to_phys		= kvm_host_pa,
435*4882a593Smuzhiyun };
436*4882a593Smuzhiyun 
437*4882a593Smuzhiyun /**
438*4882a593Smuzhiyun  * kvm_init_stage2_mmu - Initialise a S2 MMU strucrure
439*4882a593Smuzhiyun  * @kvm:	The pointer to the KVM structure
440*4882a593Smuzhiyun  * @mmu:	The pointer to the s2 MMU structure
441*4882a593Smuzhiyun  *
442*4882a593Smuzhiyun  * Allocates only the stage-2 HW PGD level table(s).
443*4882a593Smuzhiyun  * Note we don't need locking here as this is only called when the VM is
444*4882a593Smuzhiyun  * created, which can only be done once.
445*4882a593Smuzhiyun  */
kvm_init_stage2_mmu(struct kvm * kvm,struct kvm_s2_mmu * mmu)446*4882a593Smuzhiyun int kvm_init_stage2_mmu(struct kvm *kvm, struct kvm_s2_mmu *mmu)
447*4882a593Smuzhiyun {
448*4882a593Smuzhiyun 	int cpu, err;
449*4882a593Smuzhiyun 	struct kvm_pgtable *pgt;
450*4882a593Smuzhiyun 
451*4882a593Smuzhiyun 	if (mmu->pgt != NULL) {
452*4882a593Smuzhiyun 		kvm_err("kvm_arch already initialized?\n");
453*4882a593Smuzhiyun 		return -EINVAL;
454*4882a593Smuzhiyun 	}
455*4882a593Smuzhiyun 
456*4882a593Smuzhiyun 	pgt = kzalloc(sizeof(*pgt), GFP_KERNEL);
457*4882a593Smuzhiyun 	if (!pgt)
458*4882a593Smuzhiyun 		return -ENOMEM;
459*4882a593Smuzhiyun 
460*4882a593Smuzhiyun 	err = kvm_pgtable_stage2_init(pgt, &kvm->arch, &kvm_s2_mm_ops);
461*4882a593Smuzhiyun 	if (err)
462*4882a593Smuzhiyun 		goto out_free_pgtable;
463*4882a593Smuzhiyun 
464*4882a593Smuzhiyun 	mmu->last_vcpu_ran = alloc_percpu(typeof(*mmu->last_vcpu_ran));
465*4882a593Smuzhiyun 	if (!mmu->last_vcpu_ran) {
466*4882a593Smuzhiyun 		err = -ENOMEM;
467*4882a593Smuzhiyun 		goto out_destroy_pgtable;
468*4882a593Smuzhiyun 	}
469*4882a593Smuzhiyun 
470*4882a593Smuzhiyun 	for_each_possible_cpu(cpu)
471*4882a593Smuzhiyun 		*per_cpu_ptr(mmu->last_vcpu_ran, cpu) = -1;
472*4882a593Smuzhiyun 
473*4882a593Smuzhiyun 	mmu->arch = &kvm->arch;
474*4882a593Smuzhiyun 	mmu->pgt = pgt;
475*4882a593Smuzhiyun 	mmu->pgd_phys = __pa(pgt->pgd);
476*4882a593Smuzhiyun 	mmu->vmid.vmid_gen = 0;
477*4882a593Smuzhiyun 	return 0;
478*4882a593Smuzhiyun 
479*4882a593Smuzhiyun out_destroy_pgtable:
480*4882a593Smuzhiyun 	kvm_pgtable_stage2_destroy(pgt);
481*4882a593Smuzhiyun out_free_pgtable:
482*4882a593Smuzhiyun 	kfree(pgt);
483*4882a593Smuzhiyun 	return err;
484*4882a593Smuzhiyun }
485*4882a593Smuzhiyun 
stage2_unmap_memslot(struct kvm * kvm,struct kvm_memory_slot * memslot)486*4882a593Smuzhiyun static void stage2_unmap_memslot(struct kvm *kvm,
487*4882a593Smuzhiyun 				 struct kvm_memory_slot *memslot)
488*4882a593Smuzhiyun {
489*4882a593Smuzhiyun 	hva_t hva = memslot->userspace_addr;
490*4882a593Smuzhiyun 	phys_addr_t addr = memslot->base_gfn << PAGE_SHIFT;
491*4882a593Smuzhiyun 	phys_addr_t size = PAGE_SIZE * memslot->npages;
492*4882a593Smuzhiyun 	hva_t reg_end = hva + size;
493*4882a593Smuzhiyun 
494*4882a593Smuzhiyun 	/*
495*4882a593Smuzhiyun 	 * A memory region could potentially cover multiple VMAs, and any holes
496*4882a593Smuzhiyun 	 * between them, so iterate over all of them to find out if we should
497*4882a593Smuzhiyun 	 * unmap any of them.
498*4882a593Smuzhiyun 	 *
499*4882a593Smuzhiyun 	 *     +--------------------------------------------+
500*4882a593Smuzhiyun 	 * +---------------+----------------+   +----------------+
501*4882a593Smuzhiyun 	 * |   : VMA 1     |      VMA 2     |   |    VMA 3  :    |
502*4882a593Smuzhiyun 	 * +---------------+----------------+   +----------------+
503*4882a593Smuzhiyun 	 *     |               memory region                |
504*4882a593Smuzhiyun 	 *     +--------------------------------------------+
505*4882a593Smuzhiyun 	 */
506*4882a593Smuzhiyun 	do {
507*4882a593Smuzhiyun 		struct vm_area_struct *vma = find_vma(current->mm, hva);
508*4882a593Smuzhiyun 		hva_t vm_start, vm_end;
509*4882a593Smuzhiyun 
510*4882a593Smuzhiyun 		if (!vma || vma->vm_start >= reg_end)
511*4882a593Smuzhiyun 			break;
512*4882a593Smuzhiyun 
513*4882a593Smuzhiyun 		/*
514*4882a593Smuzhiyun 		 * Take the intersection of this VMA with the memory region
515*4882a593Smuzhiyun 		 */
516*4882a593Smuzhiyun 		vm_start = max(hva, vma->vm_start);
517*4882a593Smuzhiyun 		vm_end = min(reg_end, vma->vm_end);
518*4882a593Smuzhiyun 
519*4882a593Smuzhiyun 		if (!(vma->vm_flags & VM_PFNMAP)) {
520*4882a593Smuzhiyun 			gpa_t gpa = addr + (vm_start - memslot->userspace_addr);
521*4882a593Smuzhiyun 			unmap_stage2_range(&kvm->arch.mmu, gpa, vm_end - vm_start);
522*4882a593Smuzhiyun 		}
523*4882a593Smuzhiyun 		hva = vm_end;
524*4882a593Smuzhiyun 	} while (hva < reg_end);
525*4882a593Smuzhiyun }
526*4882a593Smuzhiyun 
527*4882a593Smuzhiyun /**
528*4882a593Smuzhiyun  * stage2_unmap_vm - Unmap Stage-2 RAM mappings
529*4882a593Smuzhiyun  * @kvm: The struct kvm pointer
530*4882a593Smuzhiyun  *
531*4882a593Smuzhiyun  * Go through the memregions and unmap any regular RAM
532*4882a593Smuzhiyun  * backing memory already mapped to the VM.
533*4882a593Smuzhiyun  */
stage2_unmap_vm(struct kvm * kvm)534*4882a593Smuzhiyun void stage2_unmap_vm(struct kvm *kvm)
535*4882a593Smuzhiyun {
536*4882a593Smuzhiyun 	struct kvm_memslots *slots;
537*4882a593Smuzhiyun 	struct kvm_memory_slot *memslot;
538*4882a593Smuzhiyun 	int idx;
539*4882a593Smuzhiyun 
540*4882a593Smuzhiyun 	idx = srcu_read_lock(&kvm->srcu);
541*4882a593Smuzhiyun 	mmap_read_lock(current->mm);
542*4882a593Smuzhiyun 	spin_lock(&kvm->mmu_lock);
543*4882a593Smuzhiyun 
544*4882a593Smuzhiyun 	slots = kvm_memslots(kvm);
545*4882a593Smuzhiyun 	kvm_for_each_memslot(memslot, slots)
546*4882a593Smuzhiyun 		stage2_unmap_memslot(kvm, memslot);
547*4882a593Smuzhiyun 
548*4882a593Smuzhiyun 	spin_unlock(&kvm->mmu_lock);
549*4882a593Smuzhiyun 	mmap_read_unlock(current->mm);
550*4882a593Smuzhiyun 	srcu_read_unlock(&kvm->srcu, idx);
551*4882a593Smuzhiyun }
552*4882a593Smuzhiyun 
kvm_free_stage2_pgd(struct kvm_s2_mmu * mmu)553*4882a593Smuzhiyun void kvm_free_stage2_pgd(struct kvm_s2_mmu *mmu)
554*4882a593Smuzhiyun {
555*4882a593Smuzhiyun 	struct kvm *kvm = kvm_s2_mmu_to_kvm(mmu);
556*4882a593Smuzhiyun 	struct kvm_pgtable *pgt = NULL;
557*4882a593Smuzhiyun 
558*4882a593Smuzhiyun 	spin_lock(&kvm->mmu_lock);
559*4882a593Smuzhiyun 	pgt = mmu->pgt;
560*4882a593Smuzhiyun 	if (pgt) {
561*4882a593Smuzhiyun 		mmu->pgd_phys = 0;
562*4882a593Smuzhiyun 		mmu->pgt = NULL;
563*4882a593Smuzhiyun 		free_percpu(mmu->last_vcpu_ran);
564*4882a593Smuzhiyun 	}
565*4882a593Smuzhiyun 	spin_unlock(&kvm->mmu_lock);
566*4882a593Smuzhiyun 
567*4882a593Smuzhiyun 	if (pgt) {
568*4882a593Smuzhiyun 		kvm_pgtable_stage2_destroy(pgt);
569*4882a593Smuzhiyun 		kfree(pgt);
570*4882a593Smuzhiyun 	}
571*4882a593Smuzhiyun }
572*4882a593Smuzhiyun 
573*4882a593Smuzhiyun /**
574*4882a593Smuzhiyun  * kvm_phys_addr_ioremap - map a device range to guest IPA
575*4882a593Smuzhiyun  *
576*4882a593Smuzhiyun  * @kvm:	The KVM pointer
577*4882a593Smuzhiyun  * @guest_ipa:	The IPA at which to insert the mapping
578*4882a593Smuzhiyun  * @pa:		The physical address of the device
579*4882a593Smuzhiyun  * @size:	The size of the mapping
580*4882a593Smuzhiyun  * @writable:   Whether or not to create a writable mapping
581*4882a593Smuzhiyun  */
kvm_phys_addr_ioremap(struct kvm * kvm,phys_addr_t guest_ipa,phys_addr_t pa,unsigned long size,bool writable)582*4882a593Smuzhiyun int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa,
583*4882a593Smuzhiyun 			  phys_addr_t pa, unsigned long size, bool writable)
584*4882a593Smuzhiyun {
585*4882a593Smuzhiyun 	phys_addr_t addr;
586*4882a593Smuzhiyun 	int ret = 0;
587*4882a593Smuzhiyun 	struct kvm_mmu_memory_cache cache = { 0, __GFP_ZERO, NULL, };
588*4882a593Smuzhiyun 	struct kvm_pgtable *pgt = kvm->arch.mmu.pgt;
589*4882a593Smuzhiyun 	enum kvm_pgtable_prot prot = KVM_PGTABLE_PROT_DEVICE |
590*4882a593Smuzhiyun 				     KVM_PGTABLE_PROT_R |
591*4882a593Smuzhiyun 				     (writable ? KVM_PGTABLE_PROT_W : 0);
592*4882a593Smuzhiyun 
593*4882a593Smuzhiyun 	size += offset_in_page(guest_ipa);
594*4882a593Smuzhiyun 	guest_ipa &= PAGE_MASK;
595*4882a593Smuzhiyun 
596*4882a593Smuzhiyun 	for (addr = guest_ipa; addr < guest_ipa + size; addr += PAGE_SIZE) {
597*4882a593Smuzhiyun 		ret = kvm_mmu_topup_memory_cache(&cache,
598*4882a593Smuzhiyun 						 kvm_mmu_cache_min_pages(kvm));
599*4882a593Smuzhiyun 		if (ret)
600*4882a593Smuzhiyun 			break;
601*4882a593Smuzhiyun 
602*4882a593Smuzhiyun 		spin_lock(&kvm->mmu_lock);
603*4882a593Smuzhiyun 		ret = kvm_pgtable_stage2_map(pgt, addr, PAGE_SIZE, pa, prot,
604*4882a593Smuzhiyun 					     &cache);
605*4882a593Smuzhiyun 		spin_unlock(&kvm->mmu_lock);
606*4882a593Smuzhiyun 		if (ret)
607*4882a593Smuzhiyun 			break;
608*4882a593Smuzhiyun 
609*4882a593Smuzhiyun 		pa += PAGE_SIZE;
610*4882a593Smuzhiyun 	}
611*4882a593Smuzhiyun 
612*4882a593Smuzhiyun 	kvm_mmu_free_memory_cache(&cache);
613*4882a593Smuzhiyun 	return ret;
614*4882a593Smuzhiyun }
615*4882a593Smuzhiyun 
616*4882a593Smuzhiyun /**
617*4882a593Smuzhiyun  * stage2_wp_range() - write protect stage2 memory region range
618*4882a593Smuzhiyun  * @mmu:        The KVM stage-2 MMU pointer
619*4882a593Smuzhiyun  * @addr:	Start address of range
620*4882a593Smuzhiyun  * @end:	End address of range
621*4882a593Smuzhiyun  */
stage2_wp_range(struct kvm_s2_mmu * mmu,phys_addr_t addr,phys_addr_t end)622*4882a593Smuzhiyun static void stage2_wp_range(struct kvm_s2_mmu *mmu, phys_addr_t addr, phys_addr_t end)
623*4882a593Smuzhiyun {
624*4882a593Smuzhiyun 	struct kvm *kvm = kvm_s2_mmu_to_kvm(mmu);
625*4882a593Smuzhiyun 	stage2_apply_range_resched(kvm, addr, end, kvm_pgtable_stage2_wrprotect);
626*4882a593Smuzhiyun }
627*4882a593Smuzhiyun 
628*4882a593Smuzhiyun /**
629*4882a593Smuzhiyun  * kvm_mmu_wp_memory_region() - write protect stage 2 entries for memory slot
630*4882a593Smuzhiyun  * @kvm:	The KVM pointer
631*4882a593Smuzhiyun  * @slot:	The memory slot to write protect
632*4882a593Smuzhiyun  *
633*4882a593Smuzhiyun  * Called to start logging dirty pages after memory region
634*4882a593Smuzhiyun  * KVM_MEM_LOG_DIRTY_PAGES operation is called. After this function returns
635*4882a593Smuzhiyun  * all present PUD, PMD and PTEs are write protected in the memory region.
636*4882a593Smuzhiyun  * Afterwards read of dirty page log can be called.
637*4882a593Smuzhiyun  *
638*4882a593Smuzhiyun  * Acquires kvm_mmu_lock. Called with kvm->slots_lock mutex acquired,
639*4882a593Smuzhiyun  * serializing operations for VM memory regions.
640*4882a593Smuzhiyun  */
kvm_mmu_wp_memory_region(struct kvm * kvm,int slot)641*4882a593Smuzhiyun void kvm_mmu_wp_memory_region(struct kvm *kvm, int slot)
642*4882a593Smuzhiyun {
643*4882a593Smuzhiyun 	struct kvm_memslots *slots = kvm_memslots(kvm);
644*4882a593Smuzhiyun 	struct kvm_memory_slot *memslot = id_to_memslot(slots, slot);
645*4882a593Smuzhiyun 	phys_addr_t start, end;
646*4882a593Smuzhiyun 
647*4882a593Smuzhiyun 	if (WARN_ON_ONCE(!memslot))
648*4882a593Smuzhiyun 		return;
649*4882a593Smuzhiyun 
650*4882a593Smuzhiyun 	start = memslot->base_gfn << PAGE_SHIFT;
651*4882a593Smuzhiyun 	end = (memslot->base_gfn + memslot->npages) << PAGE_SHIFT;
652*4882a593Smuzhiyun 
653*4882a593Smuzhiyun 	spin_lock(&kvm->mmu_lock);
654*4882a593Smuzhiyun 	stage2_wp_range(&kvm->arch.mmu, start, end);
655*4882a593Smuzhiyun 	spin_unlock(&kvm->mmu_lock);
656*4882a593Smuzhiyun 	kvm_flush_remote_tlbs(kvm);
657*4882a593Smuzhiyun }
658*4882a593Smuzhiyun 
659*4882a593Smuzhiyun /**
660*4882a593Smuzhiyun  * kvm_mmu_write_protect_pt_masked() - write protect dirty pages
661*4882a593Smuzhiyun  * @kvm:	The KVM pointer
662*4882a593Smuzhiyun  * @slot:	The memory slot associated with mask
663*4882a593Smuzhiyun  * @gfn_offset:	The gfn offset in memory slot
664*4882a593Smuzhiyun  * @mask:	The mask of dirty pages at offset 'gfn_offset' in this memory
665*4882a593Smuzhiyun  *		slot to be write protected
666*4882a593Smuzhiyun  *
667*4882a593Smuzhiyun  * Walks bits set in mask write protects the associated pte's. Caller must
668*4882a593Smuzhiyun  * acquire kvm_mmu_lock.
669*4882a593Smuzhiyun  */
kvm_mmu_write_protect_pt_masked(struct kvm * kvm,struct kvm_memory_slot * slot,gfn_t gfn_offset,unsigned long mask)670*4882a593Smuzhiyun static void kvm_mmu_write_protect_pt_masked(struct kvm *kvm,
671*4882a593Smuzhiyun 		struct kvm_memory_slot *slot,
672*4882a593Smuzhiyun 		gfn_t gfn_offset, unsigned long mask)
673*4882a593Smuzhiyun {
674*4882a593Smuzhiyun 	phys_addr_t base_gfn = slot->base_gfn + gfn_offset;
675*4882a593Smuzhiyun 	phys_addr_t start = (base_gfn +  __ffs(mask)) << PAGE_SHIFT;
676*4882a593Smuzhiyun 	phys_addr_t end = (base_gfn + __fls(mask) + 1) << PAGE_SHIFT;
677*4882a593Smuzhiyun 
678*4882a593Smuzhiyun 	stage2_wp_range(&kvm->arch.mmu, start, end);
679*4882a593Smuzhiyun }
680*4882a593Smuzhiyun 
681*4882a593Smuzhiyun /*
682*4882a593Smuzhiyun  * kvm_arch_mmu_enable_log_dirty_pt_masked - enable dirty logging for selected
683*4882a593Smuzhiyun  * dirty pages.
684*4882a593Smuzhiyun  *
685*4882a593Smuzhiyun  * It calls kvm_mmu_write_protect_pt_masked to write protect selected pages to
686*4882a593Smuzhiyun  * enable dirty logging for them.
687*4882a593Smuzhiyun  */
kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm * kvm,struct kvm_memory_slot * slot,gfn_t gfn_offset,unsigned long mask)688*4882a593Smuzhiyun void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm,
689*4882a593Smuzhiyun 		struct kvm_memory_slot *slot,
690*4882a593Smuzhiyun 		gfn_t gfn_offset, unsigned long mask)
691*4882a593Smuzhiyun {
692*4882a593Smuzhiyun 	kvm_mmu_write_protect_pt_masked(kvm, slot, gfn_offset, mask);
693*4882a593Smuzhiyun }
694*4882a593Smuzhiyun 
clean_dcache_guest_page(kvm_pfn_t pfn,unsigned long size)695*4882a593Smuzhiyun static void clean_dcache_guest_page(kvm_pfn_t pfn, unsigned long size)
696*4882a593Smuzhiyun {
697*4882a593Smuzhiyun 	__clean_dcache_guest_page(pfn, size);
698*4882a593Smuzhiyun }
699*4882a593Smuzhiyun 
invalidate_icache_guest_page(kvm_pfn_t pfn,unsigned long size)700*4882a593Smuzhiyun static void invalidate_icache_guest_page(kvm_pfn_t pfn, unsigned long size)
701*4882a593Smuzhiyun {
702*4882a593Smuzhiyun 	__invalidate_icache_guest_page(pfn, size);
703*4882a593Smuzhiyun }
704*4882a593Smuzhiyun 
kvm_send_hwpoison_signal(unsigned long address,short lsb)705*4882a593Smuzhiyun static void kvm_send_hwpoison_signal(unsigned long address, short lsb)
706*4882a593Smuzhiyun {
707*4882a593Smuzhiyun 	send_sig_mceerr(BUS_MCEERR_AR, (void __user *)address, lsb, current);
708*4882a593Smuzhiyun }
709*4882a593Smuzhiyun 
fault_supports_stage2_huge_mapping(struct kvm_memory_slot * memslot,unsigned long hva,unsigned long map_size)710*4882a593Smuzhiyun static bool fault_supports_stage2_huge_mapping(struct kvm_memory_slot *memslot,
711*4882a593Smuzhiyun 					       unsigned long hva,
712*4882a593Smuzhiyun 					       unsigned long map_size)
713*4882a593Smuzhiyun {
714*4882a593Smuzhiyun 	gpa_t gpa_start;
715*4882a593Smuzhiyun 	hva_t uaddr_start, uaddr_end;
716*4882a593Smuzhiyun 	size_t size;
717*4882a593Smuzhiyun 
718*4882a593Smuzhiyun 	/* The memslot and the VMA are guaranteed to be aligned to PAGE_SIZE */
719*4882a593Smuzhiyun 	if (map_size == PAGE_SIZE)
720*4882a593Smuzhiyun 		return true;
721*4882a593Smuzhiyun 
722*4882a593Smuzhiyun 	size = memslot->npages * PAGE_SIZE;
723*4882a593Smuzhiyun 
724*4882a593Smuzhiyun 	gpa_start = memslot->base_gfn << PAGE_SHIFT;
725*4882a593Smuzhiyun 
726*4882a593Smuzhiyun 	uaddr_start = memslot->userspace_addr;
727*4882a593Smuzhiyun 	uaddr_end = uaddr_start + size;
728*4882a593Smuzhiyun 
729*4882a593Smuzhiyun 	/*
730*4882a593Smuzhiyun 	 * Pages belonging to memslots that don't have the same alignment
731*4882a593Smuzhiyun 	 * within a PMD/PUD for userspace and IPA cannot be mapped with stage-2
732*4882a593Smuzhiyun 	 * PMD/PUD entries, because we'll end up mapping the wrong pages.
733*4882a593Smuzhiyun 	 *
734*4882a593Smuzhiyun 	 * Consider a layout like the following:
735*4882a593Smuzhiyun 	 *
736*4882a593Smuzhiyun 	 *    memslot->userspace_addr:
737*4882a593Smuzhiyun 	 *    +-----+--------------------+--------------------+---+
738*4882a593Smuzhiyun 	 *    |abcde|fgh  Stage-1 block  |    Stage-1 block tv|xyz|
739*4882a593Smuzhiyun 	 *    +-----+--------------------+--------------------+---+
740*4882a593Smuzhiyun 	 *
741*4882a593Smuzhiyun 	 *    memslot->base_gfn << PAGE_SHIFT:
742*4882a593Smuzhiyun 	 *      +---+--------------------+--------------------+-----+
743*4882a593Smuzhiyun 	 *      |abc|def  Stage-2 block  |    Stage-2 block   |tvxyz|
744*4882a593Smuzhiyun 	 *      +---+--------------------+--------------------+-----+
745*4882a593Smuzhiyun 	 *
746*4882a593Smuzhiyun 	 * If we create those stage-2 blocks, we'll end up with this incorrect
747*4882a593Smuzhiyun 	 * mapping:
748*4882a593Smuzhiyun 	 *   d -> f
749*4882a593Smuzhiyun 	 *   e -> g
750*4882a593Smuzhiyun 	 *   f -> h
751*4882a593Smuzhiyun 	 */
752*4882a593Smuzhiyun 	if ((gpa_start & (map_size - 1)) != (uaddr_start & (map_size - 1)))
753*4882a593Smuzhiyun 		return false;
754*4882a593Smuzhiyun 
755*4882a593Smuzhiyun 	/*
756*4882a593Smuzhiyun 	 * Next, let's make sure we're not trying to map anything not covered
757*4882a593Smuzhiyun 	 * by the memslot. This means we have to prohibit block size mappings
758*4882a593Smuzhiyun 	 * for the beginning and end of a non-block aligned and non-block sized
759*4882a593Smuzhiyun 	 * memory slot (illustrated by the head and tail parts of the
760*4882a593Smuzhiyun 	 * userspace view above containing pages 'abcde' and 'xyz',
761*4882a593Smuzhiyun 	 * respectively).
762*4882a593Smuzhiyun 	 *
763*4882a593Smuzhiyun 	 * Note that it doesn't matter if we do the check using the
764*4882a593Smuzhiyun 	 * userspace_addr or the base_gfn, as both are equally aligned (per
765*4882a593Smuzhiyun 	 * the check above) and equally sized.
766*4882a593Smuzhiyun 	 */
767*4882a593Smuzhiyun 	return (hva & ~(map_size - 1)) >= uaddr_start &&
768*4882a593Smuzhiyun 	       (hva & ~(map_size - 1)) + map_size <= uaddr_end;
769*4882a593Smuzhiyun }
770*4882a593Smuzhiyun 
771*4882a593Smuzhiyun /*
772*4882a593Smuzhiyun  * Check if the given hva is backed by a transparent huge page (THP) and
773*4882a593Smuzhiyun  * whether it can be mapped using block mapping in stage2. If so, adjust
774*4882a593Smuzhiyun  * the stage2 PFN and IPA accordingly. Only PMD_SIZE THPs are currently
775*4882a593Smuzhiyun  * supported. This will need to be updated to support other THP sizes.
776*4882a593Smuzhiyun  *
777*4882a593Smuzhiyun  * Returns the size of the mapping.
778*4882a593Smuzhiyun  */
779*4882a593Smuzhiyun static unsigned long
transparent_hugepage_adjust(struct kvm_memory_slot * memslot,unsigned long hva,kvm_pfn_t * pfnp,phys_addr_t * ipap)780*4882a593Smuzhiyun transparent_hugepage_adjust(struct kvm_memory_slot *memslot,
781*4882a593Smuzhiyun 			    unsigned long hva, kvm_pfn_t *pfnp,
782*4882a593Smuzhiyun 			    phys_addr_t *ipap)
783*4882a593Smuzhiyun {
784*4882a593Smuzhiyun 	kvm_pfn_t pfn = *pfnp;
785*4882a593Smuzhiyun 
786*4882a593Smuzhiyun 	/*
787*4882a593Smuzhiyun 	 * Make sure the adjustment is done only for THP pages. Also make
788*4882a593Smuzhiyun 	 * sure that the HVA and IPA are sufficiently aligned and that the
789*4882a593Smuzhiyun 	 * block map is contained within the memslot.
790*4882a593Smuzhiyun 	 */
791*4882a593Smuzhiyun 	if (kvm_is_transparent_hugepage(pfn) &&
792*4882a593Smuzhiyun 	    fault_supports_stage2_huge_mapping(memslot, hva, PMD_SIZE)) {
793*4882a593Smuzhiyun 		/*
794*4882a593Smuzhiyun 		 * The address we faulted on is backed by a transparent huge
795*4882a593Smuzhiyun 		 * page.  However, because we map the compound huge page and
796*4882a593Smuzhiyun 		 * not the individual tail page, we need to transfer the
797*4882a593Smuzhiyun 		 * refcount to the head page.  We have to be careful that the
798*4882a593Smuzhiyun 		 * THP doesn't start to split while we are adjusting the
799*4882a593Smuzhiyun 		 * refcounts.
800*4882a593Smuzhiyun 		 *
801*4882a593Smuzhiyun 		 * We are sure this doesn't happen, because mmu_notifier_retry
802*4882a593Smuzhiyun 		 * was successful and we are holding the mmu_lock, so if this
803*4882a593Smuzhiyun 		 * THP is trying to split, it will be blocked in the mmu
804*4882a593Smuzhiyun 		 * notifier before touching any of the pages, specifically
805*4882a593Smuzhiyun 		 * before being able to call __split_huge_page_refcount().
806*4882a593Smuzhiyun 		 *
807*4882a593Smuzhiyun 		 * We can therefore safely transfer the refcount from PG_tail
808*4882a593Smuzhiyun 		 * to PG_head and switch the pfn from a tail page to the head
809*4882a593Smuzhiyun 		 * page accordingly.
810*4882a593Smuzhiyun 		 */
811*4882a593Smuzhiyun 		*ipap &= PMD_MASK;
812*4882a593Smuzhiyun 		kvm_release_pfn_clean(pfn);
813*4882a593Smuzhiyun 		pfn &= ~(PTRS_PER_PMD - 1);
814*4882a593Smuzhiyun 		kvm_get_pfn(pfn);
815*4882a593Smuzhiyun 		*pfnp = pfn;
816*4882a593Smuzhiyun 
817*4882a593Smuzhiyun 		return PMD_SIZE;
818*4882a593Smuzhiyun 	}
819*4882a593Smuzhiyun 
820*4882a593Smuzhiyun 	/* Use page mapping if we cannot use block mapping. */
821*4882a593Smuzhiyun 	return PAGE_SIZE;
822*4882a593Smuzhiyun }
823*4882a593Smuzhiyun 
user_mem_abort(struct kvm_vcpu * vcpu,phys_addr_t fault_ipa,struct kvm_memory_slot * memslot,unsigned long hva,unsigned long fault_status)824*4882a593Smuzhiyun static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
825*4882a593Smuzhiyun 			  struct kvm_memory_slot *memslot, unsigned long hva,
826*4882a593Smuzhiyun 			  unsigned long fault_status)
827*4882a593Smuzhiyun {
828*4882a593Smuzhiyun 	int ret = 0;
829*4882a593Smuzhiyun 	bool write_fault, writable, force_pte = false;
830*4882a593Smuzhiyun 	bool exec_fault;
831*4882a593Smuzhiyun 	bool device = false;
832*4882a593Smuzhiyun 	unsigned long mmu_seq;
833*4882a593Smuzhiyun 	struct kvm *kvm = vcpu->kvm;
834*4882a593Smuzhiyun 	struct kvm_mmu_memory_cache *memcache = &vcpu->arch.mmu_page_cache;
835*4882a593Smuzhiyun 	struct vm_area_struct *vma;
836*4882a593Smuzhiyun 	short vma_shift;
837*4882a593Smuzhiyun 	gfn_t gfn;
838*4882a593Smuzhiyun 	kvm_pfn_t pfn;
839*4882a593Smuzhiyun 	bool logging_active = memslot_is_logging(memslot);
840*4882a593Smuzhiyun 	unsigned long fault_level = kvm_vcpu_trap_get_fault_level(vcpu);
841*4882a593Smuzhiyun 	unsigned long vma_pagesize, fault_granule;
842*4882a593Smuzhiyun 	enum kvm_pgtable_prot prot = KVM_PGTABLE_PROT_R;
843*4882a593Smuzhiyun 	struct kvm_pgtable *pgt;
844*4882a593Smuzhiyun 
845*4882a593Smuzhiyun 	fault_granule = 1UL << ARM64_HW_PGTABLE_LEVEL_SHIFT(fault_level);
846*4882a593Smuzhiyun 	write_fault = kvm_is_write_fault(vcpu);
847*4882a593Smuzhiyun 	exec_fault = kvm_vcpu_trap_is_exec_fault(vcpu);
848*4882a593Smuzhiyun 	VM_BUG_ON(write_fault && exec_fault);
849*4882a593Smuzhiyun 
850*4882a593Smuzhiyun 	if (fault_status == FSC_PERM && !write_fault && !exec_fault) {
851*4882a593Smuzhiyun 		kvm_err("Unexpected L2 read permission error\n");
852*4882a593Smuzhiyun 		return -EFAULT;
853*4882a593Smuzhiyun 	}
854*4882a593Smuzhiyun 
855*4882a593Smuzhiyun 	/* Let's check if we will get back a huge page backed by hugetlbfs */
856*4882a593Smuzhiyun 	mmap_read_lock(current->mm);
857*4882a593Smuzhiyun 	vma = find_vma_intersection(current->mm, hva, hva + 1);
858*4882a593Smuzhiyun 	if (unlikely(!vma)) {
859*4882a593Smuzhiyun 		kvm_err("Failed to find VMA for hva 0x%lx\n", hva);
860*4882a593Smuzhiyun 		mmap_read_unlock(current->mm);
861*4882a593Smuzhiyun 		return -EFAULT;
862*4882a593Smuzhiyun 	}
863*4882a593Smuzhiyun 
864*4882a593Smuzhiyun 	if (is_vm_hugetlb_page(vma))
865*4882a593Smuzhiyun 		vma_shift = huge_page_shift(hstate_vma(vma));
866*4882a593Smuzhiyun 	else
867*4882a593Smuzhiyun 		vma_shift = PAGE_SHIFT;
868*4882a593Smuzhiyun 
869*4882a593Smuzhiyun 	if (logging_active ||
870*4882a593Smuzhiyun 	    (vma->vm_flags & VM_PFNMAP)) {
871*4882a593Smuzhiyun 		force_pte = true;
872*4882a593Smuzhiyun 		vma_shift = PAGE_SHIFT;
873*4882a593Smuzhiyun 	}
874*4882a593Smuzhiyun 
875*4882a593Smuzhiyun 	switch (vma_shift) {
876*4882a593Smuzhiyun #ifndef __PAGETABLE_PMD_FOLDED
877*4882a593Smuzhiyun 	case PUD_SHIFT:
878*4882a593Smuzhiyun 		if (fault_supports_stage2_huge_mapping(memslot, hva, PUD_SIZE))
879*4882a593Smuzhiyun 			break;
880*4882a593Smuzhiyun 		fallthrough;
881*4882a593Smuzhiyun #endif
882*4882a593Smuzhiyun 	case CONT_PMD_SHIFT:
883*4882a593Smuzhiyun 		vma_shift = PMD_SHIFT;
884*4882a593Smuzhiyun 		fallthrough;
885*4882a593Smuzhiyun 	case PMD_SHIFT:
886*4882a593Smuzhiyun 		if (fault_supports_stage2_huge_mapping(memslot, hva, PMD_SIZE))
887*4882a593Smuzhiyun 			break;
888*4882a593Smuzhiyun 		fallthrough;
889*4882a593Smuzhiyun 	case CONT_PTE_SHIFT:
890*4882a593Smuzhiyun 		vma_shift = PAGE_SHIFT;
891*4882a593Smuzhiyun 		force_pte = true;
892*4882a593Smuzhiyun 		fallthrough;
893*4882a593Smuzhiyun 	case PAGE_SHIFT:
894*4882a593Smuzhiyun 		break;
895*4882a593Smuzhiyun 	default:
896*4882a593Smuzhiyun 		WARN_ONCE(1, "Unknown vma_shift %d", vma_shift);
897*4882a593Smuzhiyun 	}
898*4882a593Smuzhiyun 
899*4882a593Smuzhiyun 	vma_pagesize = 1UL << vma_shift;
900*4882a593Smuzhiyun 	if (vma_pagesize == PMD_SIZE || vma_pagesize == PUD_SIZE)
901*4882a593Smuzhiyun 		fault_ipa &= ~(vma_pagesize - 1);
902*4882a593Smuzhiyun 
903*4882a593Smuzhiyun 	gfn = fault_ipa >> PAGE_SHIFT;
904*4882a593Smuzhiyun 	mmap_read_unlock(current->mm);
905*4882a593Smuzhiyun 
906*4882a593Smuzhiyun 	/*
907*4882a593Smuzhiyun 	 * Permission faults just need to update the existing leaf entry,
908*4882a593Smuzhiyun 	 * and so normally don't require allocations from the memcache. The
909*4882a593Smuzhiyun 	 * only exception to this is when dirty logging is enabled at runtime
910*4882a593Smuzhiyun 	 * and a write fault needs to collapse a block entry into a table.
911*4882a593Smuzhiyun 	 */
912*4882a593Smuzhiyun 	if (fault_status != FSC_PERM || (logging_active && write_fault)) {
913*4882a593Smuzhiyun 		ret = kvm_mmu_topup_memory_cache(memcache,
914*4882a593Smuzhiyun 						 kvm_mmu_cache_min_pages(kvm));
915*4882a593Smuzhiyun 		if (ret)
916*4882a593Smuzhiyun 			return ret;
917*4882a593Smuzhiyun 	}
918*4882a593Smuzhiyun 
919*4882a593Smuzhiyun 	mmu_seq = vcpu->kvm->mmu_notifier_seq;
920*4882a593Smuzhiyun 	/*
921*4882a593Smuzhiyun 	 * Ensure the read of mmu_notifier_seq happens before we call
922*4882a593Smuzhiyun 	 * gfn_to_pfn_prot (which calls get_user_pages), so that we don't risk
923*4882a593Smuzhiyun 	 * the page we just got a reference to gets unmapped before we have a
924*4882a593Smuzhiyun 	 * chance to grab the mmu_lock, which ensure that if the page gets
925*4882a593Smuzhiyun 	 * unmapped afterwards, the call to kvm_unmap_hva will take it away
926*4882a593Smuzhiyun 	 * from us again properly. This smp_rmb() interacts with the smp_wmb()
927*4882a593Smuzhiyun 	 * in kvm_mmu_notifier_invalidate_<page|range_end>.
928*4882a593Smuzhiyun 	 */
929*4882a593Smuzhiyun 	smp_rmb();
930*4882a593Smuzhiyun 
931*4882a593Smuzhiyun 	pfn = gfn_to_pfn_prot(kvm, gfn, write_fault, &writable);
932*4882a593Smuzhiyun 	if (pfn == KVM_PFN_ERR_HWPOISON) {
933*4882a593Smuzhiyun 		kvm_send_hwpoison_signal(hva, vma_shift);
934*4882a593Smuzhiyun 		return 0;
935*4882a593Smuzhiyun 	}
936*4882a593Smuzhiyun 	if (is_error_noslot_pfn(pfn))
937*4882a593Smuzhiyun 		return -EFAULT;
938*4882a593Smuzhiyun 
939*4882a593Smuzhiyun 	if (kvm_is_device_pfn(pfn)) {
940*4882a593Smuzhiyun 		device = true;
941*4882a593Smuzhiyun 		force_pte = true;
942*4882a593Smuzhiyun 	} else if (logging_active && !write_fault) {
943*4882a593Smuzhiyun 		/*
944*4882a593Smuzhiyun 		 * Only actually map the page as writable if this was a write
945*4882a593Smuzhiyun 		 * fault.
946*4882a593Smuzhiyun 		 */
947*4882a593Smuzhiyun 		writable = false;
948*4882a593Smuzhiyun 	}
949*4882a593Smuzhiyun 
950*4882a593Smuzhiyun 	if (exec_fault && device)
951*4882a593Smuzhiyun 		return -ENOEXEC;
952*4882a593Smuzhiyun 
953*4882a593Smuzhiyun 	spin_lock(&kvm->mmu_lock);
954*4882a593Smuzhiyun 	pgt = vcpu->arch.hw_mmu->pgt;
955*4882a593Smuzhiyun 	if (mmu_notifier_retry(kvm, mmu_seq))
956*4882a593Smuzhiyun 		goto out_unlock;
957*4882a593Smuzhiyun 
958*4882a593Smuzhiyun 	/*
959*4882a593Smuzhiyun 	 * If we are not forced to use page mapping, check if we are
960*4882a593Smuzhiyun 	 * backed by a THP and thus use block mapping if possible.
961*4882a593Smuzhiyun 	 */
962*4882a593Smuzhiyun 	if (vma_pagesize == PAGE_SIZE && !force_pte)
963*4882a593Smuzhiyun 		vma_pagesize = transparent_hugepage_adjust(memslot, hva,
964*4882a593Smuzhiyun 							   &pfn, &fault_ipa);
965*4882a593Smuzhiyun 	if (writable)
966*4882a593Smuzhiyun 		prot |= KVM_PGTABLE_PROT_W;
967*4882a593Smuzhiyun 
968*4882a593Smuzhiyun 	if (fault_status != FSC_PERM && !device)
969*4882a593Smuzhiyun 		clean_dcache_guest_page(pfn, vma_pagesize);
970*4882a593Smuzhiyun 
971*4882a593Smuzhiyun 	if (exec_fault) {
972*4882a593Smuzhiyun 		prot |= KVM_PGTABLE_PROT_X;
973*4882a593Smuzhiyun 		invalidate_icache_guest_page(pfn, vma_pagesize);
974*4882a593Smuzhiyun 	}
975*4882a593Smuzhiyun 
976*4882a593Smuzhiyun 	if (device)
977*4882a593Smuzhiyun 		prot |= KVM_PGTABLE_PROT_DEVICE;
978*4882a593Smuzhiyun 	else if (cpus_have_const_cap(ARM64_HAS_CACHE_DIC))
979*4882a593Smuzhiyun 		prot |= KVM_PGTABLE_PROT_X;
980*4882a593Smuzhiyun 
981*4882a593Smuzhiyun 	/*
982*4882a593Smuzhiyun 	 * Under the premise of getting a FSC_PERM fault, we just need to relax
983*4882a593Smuzhiyun 	 * permissions only if vma_pagesize equals fault_granule. Otherwise,
984*4882a593Smuzhiyun 	 * kvm_pgtable_stage2_map() should be called to change block size.
985*4882a593Smuzhiyun 	 */
986*4882a593Smuzhiyun 	if (fault_status == FSC_PERM && vma_pagesize == fault_granule) {
987*4882a593Smuzhiyun 		ret = kvm_pgtable_stage2_relax_perms(pgt, fault_ipa, prot);
988*4882a593Smuzhiyun 	} else {
989*4882a593Smuzhiyun 		ret = kvm_pgtable_stage2_map(pgt, fault_ipa, vma_pagesize,
990*4882a593Smuzhiyun 					     __pfn_to_phys(pfn), prot,
991*4882a593Smuzhiyun 					     memcache);
992*4882a593Smuzhiyun 	}
993*4882a593Smuzhiyun 
994*4882a593Smuzhiyun 	/* Mark the page dirty only if the fault is handled successfully */
995*4882a593Smuzhiyun 	if (writable && !ret) {
996*4882a593Smuzhiyun 		kvm_set_pfn_dirty(pfn);
997*4882a593Smuzhiyun 		mark_page_dirty(kvm, gfn);
998*4882a593Smuzhiyun 	}
999*4882a593Smuzhiyun 
1000*4882a593Smuzhiyun out_unlock:
1001*4882a593Smuzhiyun 	spin_unlock(&kvm->mmu_lock);
1002*4882a593Smuzhiyun 	kvm_set_pfn_accessed(pfn);
1003*4882a593Smuzhiyun 	kvm_release_pfn_clean(pfn);
1004*4882a593Smuzhiyun 	return ret != -EAGAIN ? ret : 0;
1005*4882a593Smuzhiyun }
1006*4882a593Smuzhiyun 
1007*4882a593Smuzhiyun /* Resolve the access fault by making the page young again. */
handle_access_fault(struct kvm_vcpu * vcpu,phys_addr_t fault_ipa)1008*4882a593Smuzhiyun static void handle_access_fault(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa)
1009*4882a593Smuzhiyun {
1010*4882a593Smuzhiyun 	pte_t pte;
1011*4882a593Smuzhiyun 	kvm_pte_t kpte;
1012*4882a593Smuzhiyun 	struct kvm_s2_mmu *mmu;
1013*4882a593Smuzhiyun 
1014*4882a593Smuzhiyun 	trace_kvm_access_fault(fault_ipa);
1015*4882a593Smuzhiyun 
1016*4882a593Smuzhiyun 	spin_lock(&vcpu->kvm->mmu_lock);
1017*4882a593Smuzhiyun 	mmu = vcpu->arch.hw_mmu;
1018*4882a593Smuzhiyun 	kpte = kvm_pgtable_stage2_mkyoung(mmu->pgt, fault_ipa);
1019*4882a593Smuzhiyun 	spin_unlock(&vcpu->kvm->mmu_lock);
1020*4882a593Smuzhiyun 
1021*4882a593Smuzhiyun 	pte = __pte(kpte);
1022*4882a593Smuzhiyun 	if (pte_valid(pte))
1023*4882a593Smuzhiyun 		kvm_set_pfn_accessed(pte_pfn(pte));
1024*4882a593Smuzhiyun }
1025*4882a593Smuzhiyun 
1026*4882a593Smuzhiyun /**
1027*4882a593Smuzhiyun  * kvm_handle_guest_abort - handles all 2nd stage aborts
1028*4882a593Smuzhiyun  * @vcpu:	the VCPU pointer
1029*4882a593Smuzhiyun  *
1030*4882a593Smuzhiyun  * Any abort that gets to the host is almost guaranteed to be caused by a
1031*4882a593Smuzhiyun  * missing second stage translation table entry, which can mean that either the
1032*4882a593Smuzhiyun  * guest simply needs more memory and we must allocate an appropriate page or it
1033*4882a593Smuzhiyun  * can mean that the guest tried to access I/O memory, which is emulated by user
1034*4882a593Smuzhiyun  * space. The distinction is based on the IPA causing the fault and whether this
1035*4882a593Smuzhiyun  * memory region has been registered as standard RAM by user space.
1036*4882a593Smuzhiyun  */
kvm_handle_guest_abort(struct kvm_vcpu * vcpu)1037*4882a593Smuzhiyun int kvm_handle_guest_abort(struct kvm_vcpu *vcpu)
1038*4882a593Smuzhiyun {
1039*4882a593Smuzhiyun 	unsigned long fault_status;
1040*4882a593Smuzhiyun 	phys_addr_t fault_ipa;
1041*4882a593Smuzhiyun 	struct kvm_memory_slot *memslot;
1042*4882a593Smuzhiyun 	unsigned long hva;
1043*4882a593Smuzhiyun 	bool is_iabt, write_fault, writable;
1044*4882a593Smuzhiyun 	gfn_t gfn;
1045*4882a593Smuzhiyun 	int ret, idx;
1046*4882a593Smuzhiyun 
1047*4882a593Smuzhiyun 	fault_status = kvm_vcpu_trap_get_fault_type(vcpu);
1048*4882a593Smuzhiyun 
1049*4882a593Smuzhiyun 	fault_ipa = kvm_vcpu_get_fault_ipa(vcpu);
1050*4882a593Smuzhiyun 	is_iabt = kvm_vcpu_trap_is_iabt(vcpu);
1051*4882a593Smuzhiyun 
1052*4882a593Smuzhiyun 	/* Synchronous External Abort? */
1053*4882a593Smuzhiyun 	if (kvm_vcpu_abt_issea(vcpu)) {
1054*4882a593Smuzhiyun 		/*
1055*4882a593Smuzhiyun 		 * For RAS the host kernel may handle this abort.
1056*4882a593Smuzhiyun 		 * There is no need to pass the error into the guest.
1057*4882a593Smuzhiyun 		 */
1058*4882a593Smuzhiyun 		if (kvm_handle_guest_sea(fault_ipa, kvm_vcpu_get_esr(vcpu)))
1059*4882a593Smuzhiyun 			kvm_inject_vabt(vcpu);
1060*4882a593Smuzhiyun 
1061*4882a593Smuzhiyun 		return 1;
1062*4882a593Smuzhiyun 	}
1063*4882a593Smuzhiyun 
1064*4882a593Smuzhiyun 	trace_kvm_guest_fault(*vcpu_pc(vcpu), kvm_vcpu_get_esr(vcpu),
1065*4882a593Smuzhiyun 			      kvm_vcpu_get_hfar(vcpu), fault_ipa);
1066*4882a593Smuzhiyun 
1067*4882a593Smuzhiyun 	/* Check the stage-2 fault is trans. fault or write fault */
1068*4882a593Smuzhiyun 	if (fault_status != FSC_FAULT && fault_status != FSC_PERM &&
1069*4882a593Smuzhiyun 	    fault_status != FSC_ACCESS) {
1070*4882a593Smuzhiyun 		kvm_err("Unsupported FSC: EC=%#x xFSC=%#lx ESR_EL2=%#lx\n",
1071*4882a593Smuzhiyun 			kvm_vcpu_trap_get_class(vcpu),
1072*4882a593Smuzhiyun 			(unsigned long)kvm_vcpu_trap_get_fault(vcpu),
1073*4882a593Smuzhiyun 			(unsigned long)kvm_vcpu_get_esr(vcpu));
1074*4882a593Smuzhiyun 		return -EFAULT;
1075*4882a593Smuzhiyun 	}
1076*4882a593Smuzhiyun 
1077*4882a593Smuzhiyun 	idx = srcu_read_lock(&vcpu->kvm->srcu);
1078*4882a593Smuzhiyun 
1079*4882a593Smuzhiyun 	gfn = fault_ipa >> PAGE_SHIFT;
1080*4882a593Smuzhiyun 	memslot = gfn_to_memslot(vcpu->kvm, gfn);
1081*4882a593Smuzhiyun 	hva = gfn_to_hva_memslot_prot(memslot, gfn, &writable);
1082*4882a593Smuzhiyun 	write_fault = kvm_is_write_fault(vcpu);
1083*4882a593Smuzhiyun 	if (kvm_is_error_hva(hva) || (write_fault && !writable)) {
1084*4882a593Smuzhiyun 		/*
1085*4882a593Smuzhiyun 		 * The guest has put either its instructions or its page-tables
1086*4882a593Smuzhiyun 		 * somewhere it shouldn't have. Userspace won't be able to do
1087*4882a593Smuzhiyun 		 * anything about this (there's no syndrome for a start), so
1088*4882a593Smuzhiyun 		 * re-inject the abort back into the guest.
1089*4882a593Smuzhiyun 		 */
1090*4882a593Smuzhiyun 		if (is_iabt) {
1091*4882a593Smuzhiyun 			ret = -ENOEXEC;
1092*4882a593Smuzhiyun 			goto out;
1093*4882a593Smuzhiyun 		}
1094*4882a593Smuzhiyun 
1095*4882a593Smuzhiyun 		if (kvm_vcpu_abt_iss1tw(vcpu)) {
1096*4882a593Smuzhiyun 			kvm_inject_dabt(vcpu, kvm_vcpu_get_hfar(vcpu));
1097*4882a593Smuzhiyun 			ret = 1;
1098*4882a593Smuzhiyun 			goto out_unlock;
1099*4882a593Smuzhiyun 		}
1100*4882a593Smuzhiyun 
1101*4882a593Smuzhiyun 		/*
1102*4882a593Smuzhiyun 		 * Check for a cache maintenance operation. Since we
1103*4882a593Smuzhiyun 		 * ended-up here, we know it is outside of any memory
1104*4882a593Smuzhiyun 		 * slot. But we can't find out if that is for a device,
1105*4882a593Smuzhiyun 		 * or if the guest is just being stupid. The only thing
1106*4882a593Smuzhiyun 		 * we know for sure is that this range cannot be cached.
1107*4882a593Smuzhiyun 		 *
1108*4882a593Smuzhiyun 		 * So let's assume that the guest is just being
1109*4882a593Smuzhiyun 		 * cautious, and skip the instruction.
1110*4882a593Smuzhiyun 		 */
1111*4882a593Smuzhiyun 		if (kvm_is_error_hva(hva) && kvm_vcpu_dabt_is_cm(vcpu)) {
1112*4882a593Smuzhiyun 			kvm_incr_pc(vcpu);
1113*4882a593Smuzhiyun 			ret = 1;
1114*4882a593Smuzhiyun 			goto out_unlock;
1115*4882a593Smuzhiyun 		}
1116*4882a593Smuzhiyun 
1117*4882a593Smuzhiyun 		/*
1118*4882a593Smuzhiyun 		 * The IPA is reported as [MAX:12], so we need to
1119*4882a593Smuzhiyun 		 * complement it with the bottom 12 bits from the
1120*4882a593Smuzhiyun 		 * faulting VA. This is always 12 bits, irrespective
1121*4882a593Smuzhiyun 		 * of the page size.
1122*4882a593Smuzhiyun 		 */
1123*4882a593Smuzhiyun 		fault_ipa |= kvm_vcpu_get_hfar(vcpu) & ((1 << 12) - 1);
1124*4882a593Smuzhiyun 		ret = io_mem_abort(vcpu, fault_ipa);
1125*4882a593Smuzhiyun 		goto out_unlock;
1126*4882a593Smuzhiyun 	}
1127*4882a593Smuzhiyun 
1128*4882a593Smuzhiyun 	/* Userspace should not be able to register out-of-bounds IPAs */
1129*4882a593Smuzhiyun 	VM_BUG_ON(fault_ipa >= kvm_phys_size(vcpu->kvm));
1130*4882a593Smuzhiyun 
1131*4882a593Smuzhiyun 	if (fault_status == FSC_ACCESS) {
1132*4882a593Smuzhiyun 		handle_access_fault(vcpu, fault_ipa);
1133*4882a593Smuzhiyun 		ret = 1;
1134*4882a593Smuzhiyun 		goto out_unlock;
1135*4882a593Smuzhiyun 	}
1136*4882a593Smuzhiyun 
1137*4882a593Smuzhiyun 	ret = user_mem_abort(vcpu, fault_ipa, memslot, hva, fault_status);
1138*4882a593Smuzhiyun 	if (ret == 0)
1139*4882a593Smuzhiyun 		ret = 1;
1140*4882a593Smuzhiyun out:
1141*4882a593Smuzhiyun 	if (ret == -ENOEXEC) {
1142*4882a593Smuzhiyun 		kvm_inject_pabt(vcpu, kvm_vcpu_get_hfar(vcpu));
1143*4882a593Smuzhiyun 		ret = 1;
1144*4882a593Smuzhiyun 	}
1145*4882a593Smuzhiyun out_unlock:
1146*4882a593Smuzhiyun 	srcu_read_unlock(&vcpu->kvm->srcu, idx);
1147*4882a593Smuzhiyun 	return ret;
1148*4882a593Smuzhiyun }
1149*4882a593Smuzhiyun 
handle_hva_to_gpa(struct kvm * kvm,unsigned long start,unsigned long end,int (* handler)(struct kvm * kvm,gpa_t gpa,u64 size,void * data),void * data)1150*4882a593Smuzhiyun static int handle_hva_to_gpa(struct kvm *kvm,
1151*4882a593Smuzhiyun 			     unsigned long start,
1152*4882a593Smuzhiyun 			     unsigned long end,
1153*4882a593Smuzhiyun 			     int (*handler)(struct kvm *kvm,
1154*4882a593Smuzhiyun 					    gpa_t gpa, u64 size,
1155*4882a593Smuzhiyun 					    void *data),
1156*4882a593Smuzhiyun 			     void *data)
1157*4882a593Smuzhiyun {
1158*4882a593Smuzhiyun 	struct kvm_memslots *slots;
1159*4882a593Smuzhiyun 	struct kvm_memory_slot *memslot;
1160*4882a593Smuzhiyun 	int ret = 0;
1161*4882a593Smuzhiyun 
1162*4882a593Smuzhiyun 	slots = kvm_memslots(kvm);
1163*4882a593Smuzhiyun 
1164*4882a593Smuzhiyun 	/* we only care about the pages that the guest sees */
1165*4882a593Smuzhiyun 	kvm_for_each_memslot(memslot, slots) {
1166*4882a593Smuzhiyun 		unsigned long hva_start, hva_end;
1167*4882a593Smuzhiyun 		gfn_t gpa;
1168*4882a593Smuzhiyun 
1169*4882a593Smuzhiyun 		hva_start = max(start, memslot->userspace_addr);
1170*4882a593Smuzhiyun 		hva_end = min(end, memslot->userspace_addr +
1171*4882a593Smuzhiyun 					(memslot->npages << PAGE_SHIFT));
1172*4882a593Smuzhiyun 		if (hva_start >= hva_end)
1173*4882a593Smuzhiyun 			continue;
1174*4882a593Smuzhiyun 
1175*4882a593Smuzhiyun 		gpa = hva_to_gfn_memslot(hva_start, memslot) << PAGE_SHIFT;
1176*4882a593Smuzhiyun 		ret |= handler(kvm, gpa, (u64)(hva_end - hva_start), data);
1177*4882a593Smuzhiyun 	}
1178*4882a593Smuzhiyun 
1179*4882a593Smuzhiyun 	return ret;
1180*4882a593Smuzhiyun }
1181*4882a593Smuzhiyun 
kvm_unmap_hva_handler(struct kvm * kvm,gpa_t gpa,u64 size,void * data)1182*4882a593Smuzhiyun static int kvm_unmap_hva_handler(struct kvm *kvm, gpa_t gpa, u64 size, void *data)
1183*4882a593Smuzhiyun {
1184*4882a593Smuzhiyun 	unsigned flags = *(unsigned *)data;
1185*4882a593Smuzhiyun 	bool may_block = flags & MMU_NOTIFIER_RANGE_BLOCKABLE;
1186*4882a593Smuzhiyun 
1187*4882a593Smuzhiyun 	__unmap_stage2_range(&kvm->arch.mmu, gpa, size, may_block);
1188*4882a593Smuzhiyun 	return 0;
1189*4882a593Smuzhiyun }
1190*4882a593Smuzhiyun 
kvm_unmap_hva_range(struct kvm * kvm,unsigned long start,unsigned long end,unsigned flags)1191*4882a593Smuzhiyun int kvm_unmap_hva_range(struct kvm *kvm,
1192*4882a593Smuzhiyun 			unsigned long start, unsigned long end, unsigned flags)
1193*4882a593Smuzhiyun {
1194*4882a593Smuzhiyun 	if (!kvm->arch.mmu.pgt)
1195*4882a593Smuzhiyun 		return 0;
1196*4882a593Smuzhiyun 
1197*4882a593Smuzhiyun 	trace_kvm_unmap_hva_range(start, end);
1198*4882a593Smuzhiyun 	handle_hva_to_gpa(kvm, start, end, &kvm_unmap_hva_handler, &flags);
1199*4882a593Smuzhiyun 	return 0;
1200*4882a593Smuzhiyun }
1201*4882a593Smuzhiyun 
kvm_set_spte_handler(struct kvm * kvm,gpa_t gpa,u64 size,void * data)1202*4882a593Smuzhiyun static int kvm_set_spte_handler(struct kvm *kvm, gpa_t gpa, u64 size, void *data)
1203*4882a593Smuzhiyun {
1204*4882a593Smuzhiyun 	kvm_pfn_t *pfn = (kvm_pfn_t *)data;
1205*4882a593Smuzhiyun 
1206*4882a593Smuzhiyun 	WARN_ON(size != PAGE_SIZE);
1207*4882a593Smuzhiyun 
1208*4882a593Smuzhiyun 	/*
1209*4882a593Smuzhiyun 	 * The MMU notifiers will have unmapped a huge PMD before calling
1210*4882a593Smuzhiyun 	 * ->change_pte() (which in turn calls kvm_set_spte_hva()) and
1211*4882a593Smuzhiyun 	 * therefore we never need to clear out a huge PMD through this
1212*4882a593Smuzhiyun 	 * calling path and a memcache is not required.
1213*4882a593Smuzhiyun 	 */
1214*4882a593Smuzhiyun 	kvm_pgtable_stage2_map(kvm->arch.mmu.pgt, gpa, PAGE_SIZE,
1215*4882a593Smuzhiyun 			       __pfn_to_phys(*pfn), KVM_PGTABLE_PROT_R, NULL);
1216*4882a593Smuzhiyun 	return 0;
1217*4882a593Smuzhiyun }
1218*4882a593Smuzhiyun 
kvm_set_spte_hva(struct kvm * kvm,unsigned long hva,pte_t pte)1219*4882a593Smuzhiyun int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte)
1220*4882a593Smuzhiyun {
1221*4882a593Smuzhiyun 	unsigned long end = hva + PAGE_SIZE;
1222*4882a593Smuzhiyun 	kvm_pfn_t pfn = pte_pfn(pte);
1223*4882a593Smuzhiyun 
1224*4882a593Smuzhiyun 	if (!kvm->arch.mmu.pgt)
1225*4882a593Smuzhiyun 		return 0;
1226*4882a593Smuzhiyun 
1227*4882a593Smuzhiyun 	trace_kvm_set_spte_hva(hva);
1228*4882a593Smuzhiyun 
1229*4882a593Smuzhiyun 	/*
1230*4882a593Smuzhiyun 	 * We've moved a page around, probably through CoW, so let's treat it
1231*4882a593Smuzhiyun 	 * just like a translation fault and clean the cache to the PoC.
1232*4882a593Smuzhiyun 	 */
1233*4882a593Smuzhiyun 	clean_dcache_guest_page(pfn, PAGE_SIZE);
1234*4882a593Smuzhiyun 	handle_hva_to_gpa(kvm, hva, end, &kvm_set_spte_handler, &pfn);
1235*4882a593Smuzhiyun 	return 0;
1236*4882a593Smuzhiyun }
1237*4882a593Smuzhiyun 
kvm_age_hva_handler(struct kvm * kvm,gpa_t gpa,u64 size,void * data)1238*4882a593Smuzhiyun static int kvm_age_hva_handler(struct kvm *kvm, gpa_t gpa, u64 size, void *data)
1239*4882a593Smuzhiyun {
1240*4882a593Smuzhiyun 	pte_t pte;
1241*4882a593Smuzhiyun 	kvm_pte_t kpte;
1242*4882a593Smuzhiyun 
1243*4882a593Smuzhiyun 	WARN_ON(size != PAGE_SIZE && size != PMD_SIZE && size != PUD_SIZE);
1244*4882a593Smuzhiyun 	kpte = kvm_pgtable_stage2_mkold(kvm->arch.mmu.pgt, gpa);
1245*4882a593Smuzhiyun 	pte = __pte(kpte);
1246*4882a593Smuzhiyun 	return pte_valid(pte) && pte_young(pte);
1247*4882a593Smuzhiyun }
1248*4882a593Smuzhiyun 
kvm_test_age_hva_handler(struct kvm * kvm,gpa_t gpa,u64 size,void * data)1249*4882a593Smuzhiyun static int kvm_test_age_hva_handler(struct kvm *kvm, gpa_t gpa, u64 size, void *data)
1250*4882a593Smuzhiyun {
1251*4882a593Smuzhiyun 	WARN_ON(size != PAGE_SIZE && size != PMD_SIZE && size != PUD_SIZE);
1252*4882a593Smuzhiyun 	return kvm_pgtable_stage2_is_young(kvm->arch.mmu.pgt, gpa);
1253*4882a593Smuzhiyun }
1254*4882a593Smuzhiyun 
kvm_age_hva(struct kvm * kvm,unsigned long start,unsigned long end)1255*4882a593Smuzhiyun int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end)
1256*4882a593Smuzhiyun {
1257*4882a593Smuzhiyun 	if (!kvm->arch.mmu.pgt)
1258*4882a593Smuzhiyun 		return 0;
1259*4882a593Smuzhiyun 	trace_kvm_age_hva(start, end);
1260*4882a593Smuzhiyun 	return handle_hva_to_gpa(kvm, start, end, kvm_age_hva_handler, NULL);
1261*4882a593Smuzhiyun }
1262*4882a593Smuzhiyun 
kvm_test_age_hva(struct kvm * kvm,unsigned long hva)1263*4882a593Smuzhiyun int kvm_test_age_hva(struct kvm *kvm, unsigned long hva)
1264*4882a593Smuzhiyun {
1265*4882a593Smuzhiyun 	if (!kvm->arch.mmu.pgt)
1266*4882a593Smuzhiyun 		return 0;
1267*4882a593Smuzhiyun 	trace_kvm_test_age_hva(hva);
1268*4882a593Smuzhiyun 	return handle_hva_to_gpa(kvm, hva, hva + PAGE_SIZE,
1269*4882a593Smuzhiyun 				 kvm_test_age_hva_handler, NULL);
1270*4882a593Smuzhiyun }
1271*4882a593Smuzhiyun 
kvm_mmu_get_httbr(void)1272*4882a593Smuzhiyun phys_addr_t kvm_mmu_get_httbr(void)
1273*4882a593Smuzhiyun {
1274*4882a593Smuzhiyun 	return __pa(hyp_pgtable->pgd);
1275*4882a593Smuzhiyun }
1276*4882a593Smuzhiyun 
kvm_get_idmap_vector(void)1277*4882a593Smuzhiyun phys_addr_t kvm_get_idmap_vector(void)
1278*4882a593Smuzhiyun {
1279*4882a593Smuzhiyun 	return hyp_idmap_vector;
1280*4882a593Smuzhiyun }
1281*4882a593Smuzhiyun 
kvm_map_idmap_text(void)1282*4882a593Smuzhiyun static int kvm_map_idmap_text(void)
1283*4882a593Smuzhiyun {
1284*4882a593Smuzhiyun 	unsigned long size = hyp_idmap_end - hyp_idmap_start;
1285*4882a593Smuzhiyun 	int err = __create_hyp_mappings(hyp_idmap_start, size, hyp_idmap_start,
1286*4882a593Smuzhiyun 					PAGE_HYP_EXEC);
1287*4882a593Smuzhiyun 	if (err)
1288*4882a593Smuzhiyun 		kvm_err("Failed to idmap %lx-%lx\n",
1289*4882a593Smuzhiyun 			hyp_idmap_start, hyp_idmap_end);
1290*4882a593Smuzhiyun 
1291*4882a593Smuzhiyun 	return err;
1292*4882a593Smuzhiyun }
1293*4882a593Smuzhiyun 
kvm_hyp_zalloc_page(void * arg)1294*4882a593Smuzhiyun static void *kvm_hyp_zalloc_page(void *arg)
1295*4882a593Smuzhiyun {
1296*4882a593Smuzhiyun 	return (void *)get_zeroed_page(GFP_KERNEL);
1297*4882a593Smuzhiyun }
1298*4882a593Smuzhiyun 
1299*4882a593Smuzhiyun static struct kvm_pgtable_mm_ops kvm_hyp_mm_ops = {
1300*4882a593Smuzhiyun 	.zalloc_page		= kvm_hyp_zalloc_page,
1301*4882a593Smuzhiyun 	.get_page		= kvm_host_get_page,
1302*4882a593Smuzhiyun 	.put_page		= kvm_host_put_page,
1303*4882a593Smuzhiyun 	.phys_to_virt		= kvm_host_va,
1304*4882a593Smuzhiyun 	.virt_to_phys		= kvm_host_pa,
1305*4882a593Smuzhiyun };
1306*4882a593Smuzhiyun 
kvm_mmu_init(u32 * hyp_va_bits)1307*4882a593Smuzhiyun int kvm_mmu_init(u32 *hyp_va_bits)
1308*4882a593Smuzhiyun {
1309*4882a593Smuzhiyun 	int err;
1310*4882a593Smuzhiyun 
1311*4882a593Smuzhiyun 	hyp_idmap_start = __pa_symbol(__hyp_idmap_text_start);
1312*4882a593Smuzhiyun 	hyp_idmap_start = ALIGN_DOWN(hyp_idmap_start, PAGE_SIZE);
1313*4882a593Smuzhiyun 	hyp_idmap_end = __pa_symbol(__hyp_idmap_text_end);
1314*4882a593Smuzhiyun 	hyp_idmap_end = ALIGN(hyp_idmap_end, PAGE_SIZE);
1315*4882a593Smuzhiyun 	hyp_idmap_vector = __pa_symbol(__kvm_hyp_init);
1316*4882a593Smuzhiyun 
1317*4882a593Smuzhiyun 	/*
1318*4882a593Smuzhiyun 	 * We rely on the linker script to ensure at build time that the HYP
1319*4882a593Smuzhiyun 	 * init code does not cross a page boundary.
1320*4882a593Smuzhiyun 	 */
1321*4882a593Smuzhiyun 	BUG_ON((hyp_idmap_start ^ (hyp_idmap_end - 1)) & PAGE_MASK);
1322*4882a593Smuzhiyun 
1323*4882a593Smuzhiyun 	*hyp_va_bits = 64 - ((idmap_t0sz & TCR_T0SZ_MASK) >> TCR_T0SZ_OFFSET);
1324*4882a593Smuzhiyun 	kvm_debug("Using %u-bit virtual addresses at EL2\n", *hyp_va_bits);
1325*4882a593Smuzhiyun 	kvm_debug("IDMAP page: %lx\n", hyp_idmap_start);
1326*4882a593Smuzhiyun 	kvm_debug("HYP VA range: %lx:%lx\n",
1327*4882a593Smuzhiyun 		  kern_hyp_va(PAGE_OFFSET),
1328*4882a593Smuzhiyun 		  kern_hyp_va((unsigned long)high_memory - 1));
1329*4882a593Smuzhiyun 
1330*4882a593Smuzhiyun 	if (hyp_idmap_start >= kern_hyp_va(PAGE_OFFSET) &&
1331*4882a593Smuzhiyun 	    hyp_idmap_start <  kern_hyp_va((unsigned long)high_memory - 1) &&
1332*4882a593Smuzhiyun 	    hyp_idmap_start != (unsigned long)__hyp_idmap_text_start) {
1333*4882a593Smuzhiyun 		/*
1334*4882a593Smuzhiyun 		 * The idmap page is intersecting with the VA space,
1335*4882a593Smuzhiyun 		 * it is not safe to continue further.
1336*4882a593Smuzhiyun 		 */
1337*4882a593Smuzhiyun 		kvm_err("IDMAP intersecting with HYP VA, unable to continue\n");
1338*4882a593Smuzhiyun 		err = -EINVAL;
1339*4882a593Smuzhiyun 		goto out;
1340*4882a593Smuzhiyun 	}
1341*4882a593Smuzhiyun 
1342*4882a593Smuzhiyun 	hyp_pgtable = kzalloc(sizeof(*hyp_pgtable), GFP_KERNEL);
1343*4882a593Smuzhiyun 	if (!hyp_pgtable) {
1344*4882a593Smuzhiyun 		kvm_err("Hyp mode page-table not allocated\n");
1345*4882a593Smuzhiyun 		err = -ENOMEM;
1346*4882a593Smuzhiyun 		goto out;
1347*4882a593Smuzhiyun 	}
1348*4882a593Smuzhiyun 
1349*4882a593Smuzhiyun 	err = kvm_pgtable_hyp_init(hyp_pgtable, *hyp_va_bits, &kvm_hyp_mm_ops);
1350*4882a593Smuzhiyun 	if (err)
1351*4882a593Smuzhiyun 		goto out_free_pgtable;
1352*4882a593Smuzhiyun 
1353*4882a593Smuzhiyun 	err = kvm_map_idmap_text();
1354*4882a593Smuzhiyun 	if (err)
1355*4882a593Smuzhiyun 		goto out_destroy_pgtable;
1356*4882a593Smuzhiyun 
1357*4882a593Smuzhiyun 	io_map_base = hyp_idmap_start;
1358*4882a593Smuzhiyun 	return 0;
1359*4882a593Smuzhiyun 
1360*4882a593Smuzhiyun out_destroy_pgtable:
1361*4882a593Smuzhiyun 	kvm_pgtable_hyp_destroy(hyp_pgtable);
1362*4882a593Smuzhiyun out_free_pgtable:
1363*4882a593Smuzhiyun 	kfree(hyp_pgtable);
1364*4882a593Smuzhiyun 	hyp_pgtable = NULL;
1365*4882a593Smuzhiyun out:
1366*4882a593Smuzhiyun 	return err;
1367*4882a593Smuzhiyun }
1368*4882a593Smuzhiyun 
kvm_arch_commit_memory_region(struct kvm * kvm,const struct kvm_userspace_memory_region * mem,struct kvm_memory_slot * old,const struct kvm_memory_slot * new,enum kvm_mr_change change)1369*4882a593Smuzhiyun void kvm_arch_commit_memory_region(struct kvm *kvm,
1370*4882a593Smuzhiyun 				   const struct kvm_userspace_memory_region *mem,
1371*4882a593Smuzhiyun 				   struct kvm_memory_slot *old,
1372*4882a593Smuzhiyun 				   const struct kvm_memory_slot *new,
1373*4882a593Smuzhiyun 				   enum kvm_mr_change change)
1374*4882a593Smuzhiyun {
1375*4882a593Smuzhiyun 	/*
1376*4882a593Smuzhiyun 	 * At this point memslot has been committed and there is an
1377*4882a593Smuzhiyun 	 * allocated dirty_bitmap[], dirty pages will be tracked while the
1378*4882a593Smuzhiyun 	 * memory slot is write protected.
1379*4882a593Smuzhiyun 	 */
1380*4882a593Smuzhiyun 	if (change != KVM_MR_DELETE && mem->flags & KVM_MEM_LOG_DIRTY_PAGES) {
1381*4882a593Smuzhiyun 		/*
1382*4882a593Smuzhiyun 		 * If we're with initial-all-set, we don't need to write
1383*4882a593Smuzhiyun 		 * protect any pages because they're all reported as dirty.
1384*4882a593Smuzhiyun 		 * Huge pages and normal pages will be write protect gradually.
1385*4882a593Smuzhiyun 		 */
1386*4882a593Smuzhiyun 		if (!kvm_dirty_log_manual_protect_and_init_set(kvm)) {
1387*4882a593Smuzhiyun 			kvm_mmu_wp_memory_region(kvm, mem->slot);
1388*4882a593Smuzhiyun 		}
1389*4882a593Smuzhiyun 	}
1390*4882a593Smuzhiyun }
1391*4882a593Smuzhiyun 
kvm_arch_prepare_memory_region(struct kvm * kvm,struct kvm_memory_slot * memslot,const struct kvm_userspace_memory_region * mem,enum kvm_mr_change change)1392*4882a593Smuzhiyun int kvm_arch_prepare_memory_region(struct kvm *kvm,
1393*4882a593Smuzhiyun 				   struct kvm_memory_slot *memslot,
1394*4882a593Smuzhiyun 				   const struct kvm_userspace_memory_region *mem,
1395*4882a593Smuzhiyun 				   enum kvm_mr_change change)
1396*4882a593Smuzhiyun {
1397*4882a593Smuzhiyun 	hva_t hva = mem->userspace_addr;
1398*4882a593Smuzhiyun 	hva_t reg_end = hva + mem->memory_size;
1399*4882a593Smuzhiyun 	bool writable = !(mem->flags & KVM_MEM_READONLY);
1400*4882a593Smuzhiyun 	int ret = 0;
1401*4882a593Smuzhiyun 
1402*4882a593Smuzhiyun 	if (change != KVM_MR_CREATE && change != KVM_MR_MOVE &&
1403*4882a593Smuzhiyun 			change != KVM_MR_FLAGS_ONLY)
1404*4882a593Smuzhiyun 		return 0;
1405*4882a593Smuzhiyun 
1406*4882a593Smuzhiyun 	/*
1407*4882a593Smuzhiyun 	 * Prevent userspace from creating a memory region outside of the IPA
1408*4882a593Smuzhiyun 	 * space addressable by the KVM guest IPA space.
1409*4882a593Smuzhiyun 	 */
1410*4882a593Smuzhiyun 	if ((memslot->base_gfn + memslot->npages) > (kvm_phys_size(kvm) >> PAGE_SHIFT))
1411*4882a593Smuzhiyun 		return -EFAULT;
1412*4882a593Smuzhiyun 
1413*4882a593Smuzhiyun 	mmap_read_lock(current->mm);
1414*4882a593Smuzhiyun 	/*
1415*4882a593Smuzhiyun 	 * A memory region could potentially cover multiple VMAs, and any holes
1416*4882a593Smuzhiyun 	 * between them, so iterate over all of them to find out if we can map
1417*4882a593Smuzhiyun 	 * any of them right now.
1418*4882a593Smuzhiyun 	 *
1419*4882a593Smuzhiyun 	 *     +--------------------------------------------+
1420*4882a593Smuzhiyun 	 * +---------------+----------------+   +----------------+
1421*4882a593Smuzhiyun 	 * |   : VMA 1     |      VMA 2     |   |    VMA 3  :    |
1422*4882a593Smuzhiyun 	 * +---------------+----------------+   +----------------+
1423*4882a593Smuzhiyun 	 *     |               memory region                |
1424*4882a593Smuzhiyun 	 *     +--------------------------------------------+
1425*4882a593Smuzhiyun 	 */
1426*4882a593Smuzhiyun 	do {
1427*4882a593Smuzhiyun 		struct vm_area_struct *vma = find_vma(current->mm, hva);
1428*4882a593Smuzhiyun 		hva_t vm_start, vm_end;
1429*4882a593Smuzhiyun 
1430*4882a593Smuzhiyun 		if (!vma || vma->vm_start >= reg_end)
1431*4882a593Smuzhiyun 			break;
1432*4882a593Smuzhiyun 
1433*4882a593Smuzhiyun 		/*
1434*4882a593Smuzhiyun 		 * Take the intersection of this VMA with the memory region
1435*4882a593Smuzhiyun 		 */
1436*4882a593Smuzhiyun 		vm_start = max(hva, vma->vm_start);
1437*4882a593Smuzhiyun 		vm_end = min(reg_end, vma->vm_end);
1438*4882a593Smuzhiyun 
1439*4882a593Smuzhiyun 		if (vma->vm_flags & VM_PFNMAP) {
1440*4882a593Smuzhiyun 			gpa_t gpa = mem->guest_phys_addr +
1441*4882a593Smuzhiyun 				    (vm_start - mem->userspace_addr);
1442*4882a593Smuzhiyun 			phys_addr_t pa;
1443*4882a593Smuzhiyun 
1444*4882a593Smuzhiyun 			pa = (phys_addr_t)vma->vm_pgoff << PAGE_SHIFT;
1445*4882a593Smuzhiyun 			pa += vm_start - vma->vm_start;
1446*4882a593Smuzhiyun 
1447*4882a593Smuzhiyun 			/* IO region dirty page logging not allowed */
1448*4882a593Smuzhiyun 			if (memslot->flags & KVM_MEM_LOG_DIRTY_PAGES) {
1449*4882a593Smuzhiyun 				ret = -EINVAL;
1450*4882a593Smuzhiyun 				goto out;
1451*4882a593Smuzhiyun 			}
1452*4882a593Smuzhiyun 
1453*4882a593Smuzhiyun 			ret = kvm_phys_addr_ioremap(kvm, gpa, pa,
1454*4882a593Smuzhiyun 						    vm_end - vm_start,
1455*4882a593Smuzhiyun 						    writable);
1456*4882a593Smuzhiyun 			if (ret)
1457*4882a593Smuzhiyun 				break;
1458*4882a593Smuzhiyun 		}
1459*4882a593Smuzhiyun 		hva = vm_end;
1460*4882a593Smuzhiyun 	} while (hva < reg_end);
1461*4882a593Smuzhiyun 
1462*4882a593Smuzhiyun 	if (change == KVM_MR_FLAGS_ONLY)
1463*4882a593Smuzhiyun 		goto out;
1464*4882a593Smuzhiyun 
1465*4882a593Smuzhiyun 	spin_lock(&kvm->mmu_lock);
1466*4882a593Smuzhiyun 	if (ret)
1467*4882a593Smuzhiyun 		unmap_stage2_range(&kvm->arch.mmu, mem->guest_phys_addr, mem->memory_size);
1468*4882a593Smuzhiyun 	else if (!cpus_have_final_cap(ARM64_HAS_STAGE2_FWB))
1469*4882a593Smuzhiyun 		stage2_flush_memslot(kvm, memslot);
1470*4882a593Smuzhiyun 	spin_unlock(&kvm->mmu_lock);
1471*4882a593Smuzhiyun out:
1472*4882a593Smuzhiyun 	mmap_read_unlock(current->mm);
1473*4882a593Smuzhiyun 	return ret;
1474*4882a593Smuzhiyun }
1475*4882a593Smuzhiyun 
kvm_arch_free_memslot(struct kvm * kvm,struct kvm_memory_slot * slot)1476*4882a593Smuzhiyun void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *slot)
1477*4882a593Smuzhiyun {
1478*4882a593Smuzhiyun }
1479*4882a593Smuzhiyun 
kvm_arch_memslots_updated(struct kvm * kvm,u64 gen)1480*4882a593Smuzhiyun void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen)
1481*4882a593Smuzhiyun {
1482*4882a593Smuzhiyun }
1483*4882a593Smuzhiyun 
kvm_arch_flush_shadow_all(struct kvm * kvm)1484*4882a593Smuzhiyun void kvm_arch_flush_shadow_all(struct kvm *kvm)
1485*4882a593Smuzhiyun {
1486*4882a593Smuzhiyun 	kvm_free_stage2_pgd(&kvm->arch.mmu);
1487*4882a593Smuzhiyun }
1488*4882a593Smuzhiyun 
kvm_arch_flush_shadow_memslot(struct kvm * kvm,struct kvm_memory_slot * slot)1489*4882a593Smuzhiyun void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
1490*4882a593Smuzhiyun 				   struct kvm_memory_slot *slot)
1491*4882a593Smuzhiyun {
1492*4882a593Smuzhiyun 	gpa_t gpa = slot->base_gfn << PAGE_SHIFT;
1493*4882a593Smuzhiyun 	phys_addr_t size = slot->npages << PAGE_SHIFT;
1494*4882a593Smuzhiyun 
1495*4882a593Smuzhiyun 	spin_lock(&kvm->mmu_lock);
1496*4882a593Smuzhiyun 	unmap_stage2_range(&kvm->arch.mmu, gpa, size);
1497*4882a593Smuzhiyun 	spin_unlock(&kvm->mmu_lock);
1498*4882a593Smuzhiyun }
1499*4882a593Smuzhiyun 
1500*4882a593Smuzhiyun /*
1501*4882a593Smuzhiyun  * See note at ARMv7 ARM B1.14.4 (TL;DR: S/W ops are not easily virtualized).
1502*4882a593Smuzhiyun  *
1503*4882a593Smuzhiyun  * Main problems:
1504*4882a593Smuzhiyun  * - S/W ops are local to a CPU (not broadcast)
1505*4882a593Smuzhiyun  * - We have line migration behind our back (speculation)
1506*4882a593Smuzhiyun  * - System caches don't support S/W at all (damn!)
1507*4882a593Smuzhiyun  *
1508*4882a593Smuzhiyun  * In the face of the above, the best we can do is to try and convert
1509*4882a593Smuzhiyun  * S/W ops to VA ops. Because the guest is not allowed to infer the
1510*4882a593Smuzhiyun  * S/W to PA mapping, it can only use S/W to nuke the whole cache,
1511*4882a593Smuzhiyun  * which is a rather good thing for us.
1512*4882a593Smuzhiyun  *
1513*4882a593Smuzhiyun  * Also, it is only used when turning caches on/off ("The expected
1514*4882a593Smuzhiyun  * usage of the cache maintenance instructions that operate by set/way
1515*4882a593Smuzhiyun  * is associated with the cache maintenance instructions associated
1516*4882a593Smuzhiyun  * with the powerdown and powerup of caches, if this is required by
1517*4882a593Smuzhiyun  * the implementation.").
1518*4882a593Smuzhiyun  *
1519*4882a593Smuzhiyun  * We use the following policy:
1520*4882a593Smuzhiyun  *
1521*4882a593Smuzhiyun  * - If we trap a S/W operation, we enable VM trapping to detect
1522*4882a593Smuzhiyun  *   caches being turned on/off, and do a full clean.
1523*4882a593Smuzhiyun  *
1524*4882a593Smuzhiyun  * - We flush the caches on both caches being turned on and off.
1525*4882a593Smuzhiyun  *
1526*4882a593Smuzhiyun  * - Once the caches are enabled, we stop trapping VM ops.
1527*4882a593Smuzhiyun  */
kvm_set_way_flush(struct kvm_vcpu * vcpu)1528*4882a593Smuzhiyun void kvm_set_way_flush(struct kvm_vcpu *vcpu)
1529*4882a593Smuzhiyun {
1530*4882a593Smuzhiyun 	unsigned long hcr = *vcpu_hcr(vcpu);
1531*4882a593Smuzhiyun 
1532*4882a593Smuzhiyun 	/*
1533*4882a593Smuzhiyun 	 * If this is the first time we do a S/W operation
1534*4882a593Smuzhiyun 	 * (i.e. HCR_TVM not set) flush the whole memory, and set the
1535*4882a593Smuzhiyun 	 * VM trapping.
1536*4882a593Smuzhiyun 	 *
1537*4882a593Smuzhiyun 	 * Otherwise, rely on the VM trapping to wait for the MMU +
1538*4882a593Smuzhiyun 	 * Caches to be turned off. At that point, we'll be able to
1539*4882a593Smuzhiyun 	 * clean the caches again.
1540*4882a593Smuzhiyun 	 */
1541*4882a593Smuzhiyun 	if (!(hcr & HCR_TVM)) {
1542*4882a593Smuzhiyun 		trace_kvm_set_way_flush(*vcpu_pc(vcpu),
1543*4882a593Smuzhiyun 					vcpu_has_cache_enabled(vcpu));
1544*4882a593Smuzhiyun 		stage2_flush_vm(vcpu->kvm);
1545*4882a593Smuzhiyun 		*vcpu_hcr(vcpu) = hcr | HCR_TVM;
1546*4882a593Smuzhiyun 	}
1547*4882a593Smuzhiyun }
1548*4882a593Smuzhiyun 
kvm_toggle_cache(struct kvm_vcpu * vcpu,bool was_enabled)1549*4882a593Smuzhiyun void kvm_toggle_cache(struct kvm_vcpu *vcpu, bool was_enabled)
1550*4882a593Smuzhiyun {
1551*4882a593Smuzhiyun 	bool now_enabled = vcpu_has_cache_enabled(vcpu);
1552*4882a593Smuzhiyun 
1553*4882a593Smuzhiyun 	/*
1554*4882a593Smuzhiyun 	 * If switching the MMU+caches on, need to invalidate the caches.
1555*4882a593Smuzhiyun 	 * If switching it off, need to clean the caches.
1556*4882a593Smuzhiyun 	 * Clean + invalidate does the trick always.
1557*4882a593Smuzhiyun 	 */
1558*4882a593Smuzhiyun 	if (now_enabled != was_enabled)
1559*4882a593Smuzhiyun 		stage2_flush_vm(vcpu->kvm);
1560*4882a593Smuzhiyun 
1561*4882a593Smuzhiyun 	/* Caches are now on, stop trapping VM ops (until a S/W op) */
1562*4882a593Smuzhiyun 	if (now_enabled)
1563*4882a593Smuzhiyun 		*vcpu_hcr(vcpu) &= ~HCR_TVM;
1564*4882a593Smuzhiyun 
1565*4882a593Smuzhiyun 	trace_kvm_toggle_cache(*vcpu_pc(vcpu), was_enabled, now_enabled);
1566*4882a593Smuzhiyun }
1567