xref: /OK3568_Linux_fs/kernel/arch/mips/kvm/mmu.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun  * This file is subject to the terms and conditions of the GNU General Public
3*4882a593Smuzhiyun  * License.  See the file "COPYING" in the main directory of this archive
4*4882a593Smuzhiyun  * for more details.
5*4882a593Smuzhiyun  *
6*4882a593Smuzhiyun  * KVM/MIPS MMU handling in the KVM module.
7*4882a593Smuzhiyun  *
8*4882a593Smuzhiyun  * Copyright (C) 2012  MIPS Technologies, Inc.  All rights reserved.
9*4882a593Smuzhiyun  * Authors: Sanjay Lal <sanjayl@kymasys.com>
10*4882a593Smuzhiyun  */
11*4882a593Smuzhiyun 
12*4882a593Smuzhiyun #include <linux/highmem.h>
13*4882a593Smuzhiyun #include <linux/kvm_host.h>
14*4882a593Smuzhiyun #include <linux/uaccess.h>
15*4882a593Smuzhiyun #include <asm/mmu_context.h>
16*4882a593Smuzhiyun #include <asm/pgalloc.h>
17*4882a593Smuzhiyun 
18*4882a593Smuzhiyun /*
19*4882a593Smuzhiyun  * KVM_MMU_CACHE_MIN_PAGES is the number of GPA page table translation levels
20*4882a593Smuzhiyun  * for which pages need to be cached.
21*4882a593Smuzhiyun  */
22*4882a593Smuzhiyun #if defined(__PAGETABLE_PMD_FOLDED)
23*4882a593Smuzhiyun #define KVM_MMU_CACHE_MIN_PAGES 1
24*4882a593Smuzhiyun #else
25*4882a593Smuzhiyun #define KVM_MMU_CACHE_MIN_PAGES 2
26*4882a593Smuzhiyun #endif
27*4882a593Smuzhiyun 
kvm_mmu_free_memory_caches(struct kvm_vcpu * vcpu)28*4882a593Smuzhiyun void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu)
29*4882a593Smuzhiyun {
30*4882a593Smuzhiyun 	kvm_mmu_free_memory_cache(&vcpu->arch.mmu_page_cache);
31*4882a593Smuzhiyun }
32*4882a593Smuzhiyun 
33*4882a593Smuzhiyun /**
34*4882a593Smuzhiyun  * kvm_pgd_init() - Initialise KVM GPA page directory.
35*4882a593Smuzhiyun  * @page:	Pointer to page directory (PGD) for KVM GPA.
36*4882a593Smuzhiyun  *
37*4882a593Smuzhiyun  * Initialise a KVM GPA page directory with pointers to the invalid table, i.e.
38*4882a593Smuzhiyun  * representing no mappings. This is similar to pgd_init(), however it
39*4882a593Smuzhiyun  * initialises all the page directory pointers, not just the ones corresponding
40*4882a593Smuzhiyun  * to the userland address space (since it is for the guest physical address
41*4882a593Smuzhiyun  * space rather than a virtual address space).
42*4882a593Smuzhiyun  */
kvm_pgd_init(void * page)43*4882a593Smuzhiyun static void kvm_pgd_init(void *page)
44*4882a593Smuzhiyun {
45*4882a593Smuzhiyun 	unsigned long *p, *end;
46*4882a593Smuzhiyun 	unsigned long entry;
47*4882a593Smuzhiyun 
48*4882a593Smuzhiyun #ifdef __PAGETABLE_PMD_FOLDED
49*4882a593Smuzhiyun 	entry = (unsigned long)invalid_pte_table;
50*4882a593Smuzhiyun #else
51*4882a593Smuzhiyun 	entry = (unsigned long)invalid_pmd_table;
52*4882a593Smuzhiyun #endif
53*4882a593Smuzhiyun 
54*4882a593Smuzhiyun 	p = (unsigned long *)page;
55*4882a593Smuzhiyun 	end = p + PTRS_PER_PGD;
56*4882a593Smuzhiyun 
57*4882a593Smuzhiyun 	do {
58*4882a593Smuzhiyun 		p[0] = entry;
59*4882a593Smuzhiyun 		p[1] = entry;
60*4882a593Smuzhiyun 		p[2] = entry;
61*4882a593Smuzhiyun 		p[3] = entry;
62*4882a593Smuzhiyun 		p[4] = entry;
63*4882a593Smuzhiyun 		p += 8;
64*4882a593Smuzhiyun 		p[-3] = entry;
65*4882a593Smuzhiyun 		p[-2] = entry;
66*4882a593Smuzhiyun 		p[-1] = entry;
67*4882a593Smuzhiyun 	} while (p != end);
68*4882a593Smuzhiyun }
69*4882a593Smuzhiyun 
70*4882a593Smuzhiyun /**
71*4882a593Smuzhiyun  * kvm_pgd_alloc() - Allocate and initialise a KVM GPA page directory.
72*4882a593Smuzhiyun  *
73*4882a593Smuzhiyun  * Allocate a blank KVM GPA page directory (PGD) for representing guest physical
74*4882a593Smuzhiyun  * to host physical page mappings.
75*4882a593Smuzhiyun  *
76*4882a593Smuzhiyun  * Returns:	Pointer to new KVM GPA page directory.
77*4882a593Smuzhiyun  *		NULL on allocation failure.
78*4882a593Smuzhiyun  */
kvm_pgd_alloc(void)79*4882a593Smuzhiyun pgd_t *kvm_pgd_alloc(void)
80*4882a593Smuzhiyun {
81*4882a593Smuzhiyun 	pgd_t *ret;
82*4882a593Smuzhiyun 
83*4882a593Smuzhiyun 	ret = (pgd_t *)__get_free_pages(GFP_KERNEL, PGD_ORDER);
84*4882a593Smuzhiyun 	if (ret)
85*4882a593Smuzhiyun 		kvm_pgd_init(ret);
86*4882a593Smuzhiyun 
87*4882a593Smuzhiyun 	return ret;
88*4882a593Smuzhiyun }
89*4882a593Smuzhiyun 
90*4882a593Smuzhiyun /**
91*4882a593Smuzhiyun  * kvm_mips_walk_pgd() - Walk page table with optional allocation.
92*4882a593Smuzhiyun  * @pgd:	Page directory pointer.
93*4882a593Smuzhiyun  * @addr:	Address to index page table using.
94*4882a593Smuzhiyun  * @cache:	MMU page cache to allocate new page tables from, or NULL.
95*4882a593Smuzhiyun  *
96*4882a593Smuzhiyun  * Walk the page tables pointed to by @pgd to find the PTE corresponding to the
97*4882a593Smuzhiyun  * address @addr. If page tables don't exist for @addr, they will be created
98*4882a593Smuzhiyun  * from the MMU cache if @cache is not NULL.
99*4882a593Smuzhiyun  *
100*4882a593Smuzhiyun  * Returns:	Pointer to pte_t corresponding to @addr.
101*4882a593Smuzhiyun  *		NULL if a page table doesn't exist for @addr and !@cache.
102*4882a593Smuzhiyun  *		NULL if a page table allocation failed.
103*4882a593Smuzhiyun  */
kvm_mips_walk_pgd(pgd_t * pgd,struct kvm_mmu_memory_cache * cache,unsigned long addr)104*4882a593Smuzhiyun static pte_t *kvm_mips_walk_pgd(pgd_t *pgd, struct kvm_mmu_memory_cache *cache,
105*4882a593Smuzhiyun 				unsigned long addr)
106*4882a593Smuzhiyun {
107*4882a593Smuzhiyun 	p4d_t *p4d;
108*4882a593Smuzhiyun 	pud_t *pud;
109*4882a593Smuzhiyun 	pmd_t *pmd;
110*4882a593Smuzhiyun 
111*4882a593Smuzhiyun 	pgd += pgd_index(addr);
112*4882a593Smuzhiyun 	if (pgd_none(*pgd)) {
113*4882a593Smuzhiyun 		/* Not used on MIPS yet */
114*4882a593Smuzhiyun 		BUG();
115*4882a593Smuzhiyun 		return NULL;
116*4882a593Smuzhiyun 	}
117*4882a593Smuzhiyun 	p4d = p4d_offset(pgd, addr);
118*4882a593Smuzhiyun 	pud = pud_offset(p4d, addr);
119*4882a593Smuzhiyun 	if (pud_none(*pud)) {
120*4882a593Smuzhiyun 		pmd_t *new_pmd;
121*4882a593Smuzhiyun 
122*4882a593Smuzhiyun 		if (!cache)
123*4882a593Smuzhiyun 			return NULL;
124*4882a593Smuzhiyun 		new_pmd = kvm_mmu_memory_cache_alloc(cache);
125*4882a593Smuzhiyun 		pmd_init((unsigned long)new_pmd,
126*4882a593Smuzhiyun 			 (unsigned long)invalid_pte_table);
127*4882a593Smuzhiyun 		pud_populate(NULL, pud, new_pmd);
128*4882a593Smuzhiyun 	}
129*4882a593Smuzhiyun 	pmd = pmd_offset(pud, addr);
130*4882a593Smuzhiyun 	if (pmd_none(*pmd)) {
131*4882a593Smuzhiyun 		pte_t *new_pte;
132*4882a593Smuzhiyun 
133*4882a593Smuzhiyun 		if (!cache)
134*4882a593Smuzhiyun 			return NULL;
135*4882a593Smuzhiyun 		new_pte = kvm_mmu_memory_cache_alloc(cache);
136*4882a593Smuzhiyun 		clear_page(new_pte);
137*4882a593Smuzhiyun 		pmd_populate_kernel(NULL, pmd, new_pte);
138*4882a593Smuzhiyun 	}
139*4882a593Smuzhiyun 	return pte_offset_kernel(pmd, addr);
140*4882a593Smuzhiyun }
141*4882a593Smuzhiyun 
142*4882a593Smuzhiyun /* Caller must hold kvm->mm_lock */
kvm_mips_pte_for_gpa(struct kvm * kvm,struct kvm_mmu_memory_cache * cache,unsigned long addr)143*4882a593Smuzhiyun static pte_t *kvm_mips_pte_for_gpa(struct kvm *kvm,
144*4882a593Smuzhiyun 				   struct kvm_mmu_memory_cache *cache,
145*4882a593Smuzhiyun 				   unsigned long addr)
146*4882a593Smuzhiyun {
147*4882a593Smuzhiyun 	return kvm_mips_walk_pgd(kvm->arch.gpa_mm.pgd, cache, addr);
148*4882a593Smuzhiyun }
149*4882a593Smuzhiyun 
150*4882a593Smuzhiyun /*
151*4882a593Smuzhiyun  * kvm_mips_flush_gpa_{pte,pmd,pud,pgd,pt}.
152*4882a593Smuzhiyun  * Flush a range of guest physical address space from the VM's GPA page tables.
153*4882a593Smuzhiyun  */
154*4882a593Smuzhiyun 
kvm_mips_flush_gpa_pte(pte_t * pte,unsigned long start_gpa,unsigned long end_gpa)155*4882a593Smuzhiyun static bool kvm_mips_flush_gpa_pte(pte_t *pte, unsigned long start_gpa,
156*4882a593Smuzhiyun 				   unsigned long end_gpa)
157*4882a593Smuzhiyun {
158*4882a593Smuzhiyun 	int i_min = pte_index(start_gpa);
159*4882a593Smuzhiyun 	int i_max = pte_index(end_gpa);
160*4882a593Smuzhiyun 	bool safe_to_remove = (i_min == 0 && i_max == PTRS_PER_PTE - 1);
161*4882a593Smuzhiyun 	int i;
162*4882a593Smuzhiyun 
163*4882a593Smuzhiyun 	for (i = i_min; i <= i_max; ++i) {
164*4882a593Smuzhiyun 		if (!pte_present(pte[i]))
165*4882a593Smuzhiyun 			continue;
166*4882a593Smuzhiyun 
167*4882a593Smuzhiyun 		set_pte(pte + i, __pte(0));
168*4882a593Smuzhiyun 	}
169*4882a593Smuzhiyun 	return safe_to_remove;
170*4882a593Smuzhiyun }
171*4882a593Smuzhiyun 
kvm_mips_flush_gpa_pmd(pmd_t * pmd,unsigned long start_gpa,unsigned long end_gpa)172*4882a593Smuzhiyun static bool kvm_mips_flush_gpa_pmd(pmd_t *pmd, unsigned long start_gpa,
173*4882a593Smuzhiyun 				   unsigned long end_gpa)
174*4882a593Smuzhiyun {
175*4882a593Smuzhiyun 	pte_t *pte;
176*4882a593Smuzhiyun 	unsigned long end = ~0ul;
177*4882a593Smuzhiyun 	int i_min = pmd_index(start_gpa);
178*4882a593Smuzhiyun 	int i_max = pmd_index(end_gpa);
179*4882a593Smuzhiyun 	bool safe_to_remove = (i_min == 0 && i_max == PTRS_PER_PMD - 1);
180*4882a593Smuzhiyun 	int i;
181*4882a593Smuzhiyun 
182*4882a593Smuzhiyun 	for (i = i_min; i <= i_max; ++i, start_gpa = 0) {
183*4882a593Smuzhiyun 		if (!pmd_present(pmd[i]))
184*4882a593Smuzhiyun 			continue;
185*4882a593Smuzhiyun 
186*4882a593Smuzhiyun 		pte = pte_offset_kernel(pmd + i, 0);
187*4882a593Smuzhiyun 		if (i == i_max)
188*4882a593Smuzhiyun 			end = end_gpa;
189*4882a593Smuzhiyun 
190*4882a593Smuzhiyun 		if (kvm_mips_flush_gpa_pte(pte, start_gpa, end)) {
191*4882a593Smuzhiyun 			pmd_clear(pmd + i);
192*4882a593Smuzhiyun 			pte_free_kernel(NULL, pte);
193*4882a593Smuzhiyun 		} else {
194*4882a593Smuzhiyun 			safe_to_remove = false;
195*4882a593Smuzhiyun 		}
196*4882a593Smuzhiyun 	}
197*4882a593Smuzhiyun 	return safe_to_remove;
198*4882a593Smuzhiyun }
199*4882a593Smuzhiyun 
kvm_mips_flush_gpa_pud(pud_t * pud,unsigned long start_gpa,unsigned long end_gpa)200*4882a593Smuzhiyun static bool kvm_mips_flush_gpa_pud(pud_t *pud, unsigned long start_gpa,
201*4882a593Smuzhiyun 				   unsigned long end_gpa)
202*4882a593Smuzhiyun {
203*4882a593Smuzhiyun 	pmd_t *pmd;
204*4882a593Smuzhiyun 	unsigned long end = ~0ul;
205*4882a593Smuzhiyun 	int i_min = pud_index(start_gpa);
206*4882a593Smuzhiyun 	int i_max = pud_index(end_gpa);
207*4882a593Smuzhiyun 	bool safe_to_remove = (i_min == 0 && i_max == PTRS_PER_PUD - 1);
208*4882a593Smuzhiyun 	int i;
209*4882a593Smuzhiyun 
210*4882a593Smuzhiyun 	for (i = i_min; i <= i_max; ++i, start_gpa = 0) {
211*4882a593Smuzhiyun 		if (!pud_present(pud[i]))
212*4882a593Smuzhiyun 			continue;
213*4882a593Smuzhiyun 
214*4882a593Smuzhiyun 		pmd = pmd_offset(pud + i, 0);
215*4882a593Smuzhiyun 		if (i == i_max)
216*4882a593Smuzhiyun 			end = end_gpa;
217*4882a593Smuzhiyun 
218*4882a593Smuzhiyun 		if (kvm_mips_flush_gpa_pmd(pmd, start_gpa, end)) {
219*4882a593Smuzhiyun 			pud_clear(pud + i);
220*4882a593Smuzhiyun 			pmd_free(NULL, pmd);
221*4882a593Smuzhiyun 		} else {
222*4882a593Smuzhiyun 			safe_to_remove = false;
223*4882a593Smuzhiyun 		}
224*4882a593Smuzhiyun 	}
225*4882a593Smuzhiyun 	return safe_to_remove;
226*4882a593Smuzhiyun }
227*4882a593Smuzhiyun 
kvm_mips_flush_gpa_pgd(pgd_t * pgd,unsigned long start_gpa,unsigned long end_gpa)228*4882a593Smuzhiyun static bool kvm_mips_flush_gpa_pgd(pgd_t *pgd, unsigned long start_gpa,
229*4882a593Smuzhiyun 				   unsigned long end_gpa)
230*4882a593Smuzhiyun {
231*4882a593Smuzhiyun 	p4d_t *p4d;
232*4882a593Smuzhiyun 	pud_t *pud;
233*4882a593Smuzhiyun 	unsigned long end = ~0ul;
234*4882a593Smuzhiyun 	int i_min = pgd_index(start_gpa);
235*4882a593Smuzhiyun 	int i_max = pgd_index(end_gpa);
236*4882a593Smuzhiyun 	bool safe_to_remove = (i_min == 0 && i_max == PTRS_PER_PGD - 1);
237*4882a593Smuzhiyun 	int i;
238*4882a593Smuzhiyun 
239*4882a593Smuzhiyun 	for (i = i_min; i <= i_max; ++i, start_gpa = 0) {
240*4882a593Smuzhiyun 		if (!pgd_present(pgd[i]))
241*4882a593Smuzhiyun 			continue;
242*4882a593Smuzhiyun 
243*4882a593Smuzhiyun 		p4d = p4d_offset(pgd, 0);
244*4882a593Smuzhiyun 		pud = pud_offset(p4d + i, 0);
245*4882a593Smuzhiyun 		if (i == i_max)
246*4882a593Smuzhiyun 			end = end_gpa;
247*4882a593Smuzhiyun 
248*4882a593Smuzhiyun 		if (kvm_mips_flush_gpa_pud(pud, start_gpa, end)) {
249*4882a593Smuzhiyun 			pgd_clear(pgd + i);
250*4882a593Smuzhiyun 			pud_free(NULL, pud);
251*4882a593Smuzhiyun 		} else {
252*4882a593Smuzhiyun 			safe_to_remove = false;
253*4882a593Smuzhiyun 		}
254*4882a593Smuzhiyun 	}
255*4882a593Smuzhiyun 	return safe_to_remove;
256*4882a593Smuzhiyun }
257*4882a593Smuzhiyun 
258*4882a593Smuzhiyun /**
259*4882a593Smuzhiyun  * kvm_mips_flush_gpa_pt() - Flush a range of guest physical addresses.
260*4882a593Smuzhiyun  * @kvm:	KVM pointer.
261*4882a593Smuzhiyun  * @start_gfn:	Guest frame number of first page in GPA range to flush.
262*4882a593Smuzhiyun  * @end_gfn:	Guest frame number of last page in GPA range to flush.
263*4882a593Smuzhiyun  *
264*4882a593Smuzhiyun  * Flushes a range of GPA mappings from the GPA page tables.
265*4882a593Smuzhiyun  *
266*4882a593Smuzhiyun  * The caller must hold the @kvm->mmu_lock spinlock.
267*4882a593Smuzhiyun  *
268*4882a593Smuzhiyun  * Returns:	Whether its safe to remove the top level page directory because
269*4882a593Smuzhiyun  *		all lower levels have been removed.
270*4882a593Smuzhiyun  */
kvm_mips_flush_gpa_pt(struct kvm * kvm,gfn_t start_gfn,gfn_t end_gfn)271*4882a593Smuzhiyun bool kvm_mips_flush_gpa_pt(struct kvm *kvm, gfn_t start_gfn, gfn_t end_gfn)
272*4882a593Smuzhiyun {
273*4882a593Smuzhiyun 	return kvm_mips_flush_gpa_pgd(kvm->arch.gpa_mm.pgd,
274*4882a593Smuzhiyun 				      start_gfn << PAGE_SHIFT,
275*4882a593Smuzhiyun 				      end_gfn << PAGE_SHIFT);
276*4882a593Smuzhiyun }
277*4882a593Smuzhiyun 
278*4882a593Smuzhiyun #define BUILD_PTE_RANGE_OP(name, op)					\
279*4882a593Smuzhiyun static int kvm_mips_##name##_pte(pte_t *pte, unsigned long start,	\
280*4882a593Smuzhiyun 				 unsigned long end)			\
281*4882a593Smuzhiyun {									\
282*4882a593Smuzhiyun 	int ret = 0;							\
283*4882a593Smuzhiyun 	int i_min = pte_index(start);				\
284*4882a593Smuzhiyun 	int i_max = pte_index(end);					\
285*4882a593Smuzhiyun 	int i;								\
286*4882a593Smuzhiyun 	pte_t old, new;							\
287*4882a593Smuzhiyun 									\
288*4882a593Smuzhiyun 	for (i = i_min; i <= i_max; ++i) {				\
289*4882a593Smuzhiyun 		if (!pte_present(pte[i]))				\
290*4882a593Smuzhiyun 			continue;					\
291*4882a593Smuzhiyun 									\
292*4882a593Smuzhiyun 		old = pte[i];						\
293*4882a593Smuzhiyun 		new = op(old);						\
294*4882a593Smuzhiyun 		if (pte_val(new) == pte_val(old))			\
295*4882a593Smuzhiyun 			continue;					\
296*4882a593Smuzhiyun 		set_pte(pte + i, new);					\
297*4882a593Smuzhiyun 		ret = 1;						\
298*4882a593Smuzhiyun 	}								\
299*4882a593Smuzhiyun 	return ret;							\
300*4882a593Smuzhiyun }									\
301*4882a593Smuzhiyun 									\
302*4882a593Smuzhiyun /* returns true if anything was done */					\
303*4882a593Smuzhiyun static int kvm_mips_##name##_pmd(pmd_t *pmd, unsigned long start,	\
304*4882a593Smuzhiyun 				 unsigned long end)			\
305*4882a593Smuzhiyun {									\
306*4882a593Smuzhiyun 	int ret = 0;							\
307*4882a593Smuzhiyun 	pte_t *pte;							\
308*4882a593Smuzhiyun 	unsigned long cur_end = ~0ul;					\
309*4882a593Smuzhiyun 	int i_min = pmd_index(start);				\
310*4882a593Smuzhiyun 	int i_max = pmd_index(end);					\
311*4882a593Smuzhiyun 	int i;								\
312*4882a593Smuzhiyun 									\
313*4882a593Smuzhiyun 	for (i = i_min; i <= i_max; ++i, start = 0) {			\
314*4882a593Smuzhiyun 		if (!pmd_present(pmd[i]))				\
315*4882a593Smuzhiyun 			continue;					\
316*4882a593Smuzhiyun 									\
317*4882a593Smuzhiyun 		pte = pte_offset_kernel(pmd + i, 0);				\
318*4882a593Smuzhiyun 		if (i == i_max)						\
319*4882a593Smuzhiyun 			cur_end = end;					\
320*4882a593Smuzhiyun 									\
321*4882a593Smuzhiyun 		ret |= kvm_mips_##name##_pte(pte, start, cur_end);	\
322*4882a593Smuzhiyun 	}								\
323*4882a593Smuzhiyun 	return ret;							\
324*4882a593Smuzhiyun }									\
325*4882a593Smuzhiyun 									\
326*4882a593Smuzhiyun static int kvm_mips_##name##_pud(pud_t *pud, unsigned long start,	\
327*4882a593Smuzhiyun 				 unsigned long end)			\
328*4882a593Smuzhiyun {									\
329*4882a593Smuzhiyun 	int ret = 0;							\
330*4882a593Smuzhiyun 	pmd_t *pmd;							\
331*4882a593Smuzhiyun 	unsigned long cur_end = ~0ul;					\
332*4882a593Smuzhiyun 	int i_min = pud_index(start);				\
333*4882a593Smuzhiyun 	int i_max = pud_index(end);					\
334*4882a593Smuzhiyun 	int i;								\
335*4882a593Smuzhiyun 									\
336*4882a593Smuzhiyun 	for (i = i_min; i <= i_max; ++i, start = 0) {			\
337*4882a593Smuzhiyun 		if (!pud_present(pud[i]))				\
338*4882a593Smuzhiyun 			continue;					\
339*4882a593Smuzhiyun 									\
340*4882a593Smuzhiyun 		pmd = pmd_offset(pud + i, 0);				\
341*4882a593Smuzhiyun 		if (i == i_max)						\
342*4882a593Smuzhiyun 			cur_end = end;					\
343*4882a593Smuzhiyun 									\
344*4882a593Smuzhiyun 		ret |= kvm_mips_##name##_pmd(pmd, start, cur_end);	\
345*4882a593Smuzhiyun 	}								\
346*4882a593Smuzhiyun 	return ret;							\
347*4882a593Smuzhiyun }									\
348*4882a593Smuzhiyun 									\
349*4882a593Smuzhiyun static int kvm_mips_##name##_pgd(pgd_t *pgd, unsigned long start,	\
350*4882a593Smuzhiyun 				 unsigned long end)			\
351*4882a593Smuzhiyun {									\
352*4882a593Smuzhiyun 	int ret = 0;							\
353*4882a593Smuzhiyun 	p4d_t *p4d;							\
354*4882a593Smuzhiyun 	pud_t *pud;							\
355*4882a593Smuzhiyun 	unsigned long cur_end = ~0ul;					\
356*4882a593Smuzhiyun 	int i_min = pgd_index(start);					\
357*4882a593Smuzhiyun 	int i_max = pgd_index(end);					\
358*4882a593Smuzhiyun 	int i;								\
359*4882a593Smuzhiyun 									\
360*4882a593Smuzhiyun 	for (i = i_min; i <= i_max; ++i, start = 0) {			\
361*4882a593Smuzhiyun 		if (!pgd_present(pgd[i]))				\
362*4882a593Smuzhiyun 			continue;					\
363*4882a593Smuzhiyun 									\
364*4882a593Smuzhiyun 		p4d = p4d_offset(pgd, 0);				\
365*4882a593Smuzhiyun 		pud = pud_offset(p4d + i, 0);				\
366*4882a593Smuzhiyun 		if (i == i_max)						\
367*4882a593Smuzhiyun 			cur_end = end;					\
368*4882a593Smuzhiyun 									\
369*4882a593Smuzhiyun 		ret |= kvm_mips_##name##_pud(pud, start, cur_end);	\
370*4882a593Smuzhiyun 	}								\
371*4882a593Smuzhiyun 	return ret;							\
372*4882a593Smuzhiyun }
373*4882a593Smuzhiyun 
374*4882a593Smuzhiyun /*
375*4882a593Smuzhiyun  * kvm_mips_mkclean_gpa_pt.
376*4882a593Smuzhiyun  * Mark a range of guest physical address space clean (writes fault) in the VM's
377*4882a593Smuzhiyun  * GPA page table to allow dirty page tracking.
378*4882a593Smuzhiyun  */
379*4882a593Smuzhiyun 
BUILD_PTE_RANGE_OP(mkclean,pte_mkclean)380*4882a593Smuzhiyun BUILD_PTE_RANGE_OP(mkclean, pte_mkclean)
381*4882a593Smuzhiyun 
382*4882a593Smuzhiyun /**
383*4882a593Smuzhiyun  * kvm_mips_mkclean_gpa_pt() - Make a range of guest physical addresses clean.
384*4882a593Smuzhiyun  * @kvm:	KVM pointer.
385*4882a593Smuzhiyun  * @start_gfn:	Guest frame number of first page in GPA range to flush.
386*4882a593Smuzhiyun  * @end_gfn:	Guest frame number of last page in GPA range to flush.
387*4882a593Smuzhiyun  *
388*4882a593Smuzhiyun  * Make a range of GPA mappings clean so that guest writes will fault and
389*4882a593Smuzhiyun  * trigger dirty page logging.
390*4882a593Smuzhiyun  *
391*4882a593Smuzhiyun  * The caller must hold the @kvm->mmu_lock spinlock.
392*4882a593Smuzhiyun  *
393*4882a593Smuzhiyun  * Returns:	Whether any GPA mappings were modified, which would require
394*4882a593Smuzhiyun  *		derived mappings (GVA page tables & TLB enties) to be
395*4882a593Smuzhiyun  *		invalidated.
396*4882a593Smuzhiyun  */
397*4882a593Smuzhiyun int kvm_mips_mkclean_gpa_pt(struct kvm *kvm, gfn_t start_gfn, gfn_t end_gfn)
398*4882a593Smuzhiyun {
399*4882a593Smuzhiyun 	return kvm_mips_mkclean_pgd(kvm->arch.gpa_mm.pgd,
400*4882a593Smuzhiyun 				    start_gfn << PAGE_SHIFT,
401*4882a593Smuzhiyun 				    end_gfn << PAGE_SHIFT);
402*4882a593Smuzhiyun }
403*4882a593Smuzhiyun 
404*4882a593Smuzhiyun /**
405*4882a593Smuzhiyun  * kvm_arch_mmu_enable_log_dirty_pt_masked() - write protect dirty pages
406*4882a593Smuzhiyun  * @kvm:	The KVM pointer
407*4882a593Smuzhiyun  * @slot:	The memory slot associated with mask
408*4882a593Smuzhiyun  * @gfn_offset:	The gfn offset in memory slot
409*4882a593Smuzhiyun  * @mask:	The mask of dirty pages at offset 'gfn_offset' in this memory
410*4882a593Smuzhiyun  *		slot to be write protected
411*4882a593Smuzhiyun  *
412*4882a593Smuzhiyun  * Walks bits set in mask write protects the associated pte's. Caller must
413*4882a593Smuzhiyun  * acquire @kvm->mmu_lock.
414*4882a593Smuzhiyun  */
kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm * kvm,struct kvm_memory_slot * slot,gfn_t gfn_offset,unsigned long mask)415*4882a593Smuzhiyun void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm,
416*4882a593Smuzhiyun 		struct kvm_memory_slot *slot,
417*4882a593Smuzhiyun 		gfn_t gfn_offset, unsigned long mask)
418*4882a593Smuzhiyun {
419*4882a593Smuzhiyun 	gfn_t base_gfn = slot->base_gfn + gfn_offset;
420*4882a593Smuzhiyun 	gfn_t start = base_gfn +  __ffs(mask);
421*4882a593Smuzhiyun 	gfn_t end = base_gfn + __fls(mask);
422*4882a593Smuzhiyun 
423*4882a593Smuzhiyun 	kvm_mips_mkclean_gpa_pt(kvm, start, end);
424*4882a593Smuzhiyun }
425*4882a593Smuzhiyun 
426*4882a593Smuzhiyun /*
427*4882a593Smuzhiyun  * kvm_mips_mkold_gpa_pt.
428*4882a593Smuzhiyun  * Mark a range of guest physical address space old (all accesses fault) in the
429*4882a593Smuzhiyun  * VM's GPA page table to allow detection of commonly used pages.
430*4882a593Smuzhiyun  */
431*4882a593Smuzhiyun 
BUILD_PTE_RANGE_OP(mkold,pte_mkold)432*4882a593Smuzhiyun BUILD_PTE_RANGE_OP(mkold, pte_mkold)
433*4882a593Smuzhiyun 
434*4882a593Smuzhiyun static int kvm_mips_mkold_gpa_pt(struct kvm *kvm, gfn_t start_gfn,
435*4882a593Smuzhiyun 				 gfn_t end_gfn)
436*4882a593Smuzhiyun {
437*4882a593Smuzhiyun 	return kvm_mips_mkold_pgd(kvm->arch.gpa_mm.pgd,
438*4882a593Smuzhiyun 				  start_gfn << PAGE_SHIFT,
439*4882a593Smuzhiyun 				  end_gfn << PAGE_SHIFT);
440*4882a593Smuzhiyun }
441*4882a593Smuzhiyun 
handle_hva_to_gpa(struct kvm * kvm,unsigned long start,unsigned long end,int (* handler)(struct kvm * kvm,gfn_t gfn,gpa_t gfn_end,struct kvm_memory_slot * memslot,void * data),void * data)442*4882a593Smuzhiyun static int handle_hva_to_gpa(struct kvm *kvm,
443*4882a593Smuzhiyun 			     unsigned long start,
444*4882a593Smuzhiyun 			     unsigned long end,
445*4882a593Smuzhiyun 			     int (*handler)(struct kvm *kvm, gfn_t gfn,
446*4882a593Smuzhiyun 					    gpa_t gfn_end,
447*4882a593Smuzhiyun 					    struct kvm_memory_slot *memslot,
448*4882a593Smuzhiyun 					    void *data),
449*4882a593Smuzhiyun 			     void *data)
450*4882a593Smuzhiyun {
451*4882a593Smuzhiyun 	struct kvm_memslots *slots;
452*4882a593Smuzhiyun 	struct kvm_memory_slot *memslot;
453*4882a593Smuzhiyun 	int ret = 0;
454*4882a593Smuzhiyun 
455*4882a593Smuzhiyun 	slots = kvm_memslots(kvm);
456*4882a593Smuzhiyun 
457*4882a593Smuzhiyun 	/* we only care about the pages that the guest sees */
458*4882a593Smuzhiyun 	kvm_for_each_memslot(memslot, slots) {
459*4882a593Smuzhiyun 		unsigned long hva_start, hva_end;
460*4882a593Smuzhiyun 		gfn_t gfn, gfn_end;
461*4882a593Smuzhiyun 
462*4882a593Smuzhiyun 		hva_start = max(start, memslot->userspace_addr);
463*4882a593Smuzhiyun 		hva_end = min(end, memslot->userspace_addr +
464*4882a593Smuzhiyun 					(memslot->npages << PAGE_SHIFT));
465*4882a593Smuzhiyun 		if (hva_start >= hva_end)
466*4882a593Smuzhiyun 			continue;
467*4882a593Smuzhiyun 
468*4882a593Smuzhiyun 		/*
469*4882a593Smuzhiyun 		 * {gfn(page) | page intersects with [hva_start, hva_end)} =
470*4882a593Smuzhiyun 		 * {gfn_start, gfn_start+1, ..., gfn_end-1}.
471*4882a593Smuzhiyun 		 */
472*4882a593Smuzhiyun 		gfn = hva_to_gfn_memslot(hva_start, memslot);
473*4882a593Smuzhiyun 		gfn_end = hva_to_gfn_memslot(hva_end + PAGE_SIZE - 1, memslot);
474*4882a593Smuzhiyun 
475*4882a593Smuzhiyun 		ret |= handler(kvm, gfn, gfn_end, memslot, data);
476*4882a593Smuzhiyun 	}
477*4882a593Smuzhiyun 
478*4882a593Smuzhiyun 	return ret;
479*4882a593Smuzhiyun }
480*4882a593Smuzhiyun 
481*4882a593Smuzhiyun 
kvm_unmap_hva_handler(struct kvm * kvm,gfn_t gfn,gfn_t gfn_end,struct kvm_memory_slot * memslot,void * data)482*4882a593Smuzhiyun static int kvm_unmap_hva_handler(struct kvm *kvm, gfn_t gfn, gfn_t gfn_end,
483*4882a593Smuzhiyun 				 struct kvm_memory_slot *memslot, void *data)
484*4882a593Smuzhiyun {
485*4882a593Smuzhiyun 	kvm_mips_flush_gpa_pt(kvm, gfn, gfn_end);
486*4882a593Smuzhiyun 	return 1;
487*4882a593Smuzhiyun }
488*4882a593Smuzhiyun 
kvm_unmap_hva_range(struct kvm * kvm,unsigned long start,unsigned long end,unsigned flags)489*4882a593Smuzhiyun int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end,
490*4882a593Smuzhiyun 			unsigned flags)
491*4882a593Smuzhiyun {
492*4882a593Smuzhiyun 	handle_hva_to_gpa(kvm, start, end, &kvm_unmap_hva_handler, NULL);
493*4882a593Smuzhiyun 
494*4882a593Smuzhiyun 	kvm_mips_callbacks->flush_shadow_all(kvm);
495*4882a593Smuzhiyun 	return 0;
496*4882a593Smuzhiyun }
497*4882a593Smuzhiyun 
kvm_set_spte_handler(struct kvm * kvm,gfn_t gfn,gfn_t gfn_end,struct kvm_memory_slot * memslot,void * data)498*4882a593Smuzhiyun static int kvm_set_spte_handler(struct kvm *kvm, gfn_t gfn, gfn_t gfn_end,
499*4882a593Smuzhiyun 				struct kvm_memory_slot *memslot, void *data)
500*4882a593Smuzhiyun {
501*4882a593Smuzhiyun 	gpa_t gpa = gfn << PAGE_SHIFT;
502*4882a593Smuzhiyun 	pte_t hva_pte = *(pte_t *)data;
503*4882a593Smuzhiyun 	pte_t *gpa_pte = kvm_mips_pte_for_gpa(kvm, NULL, gpa);
504*4882a593Smuzhiyun 	pte_t old_pte;
505*4882a593Smuzhiyun 
506*4882a593Smuzhiyun 	if (!gpa_pte)
507*4882a593Smuzhiyun 		return 0;
508*4882a593Smuzhiyun 
509*4882a593Smuzhiyun 	/* Mapping may need adjusting depending on memslot flags */
510*4882a593Smuzhiyun 	old_pte = *gpa_pte;
511*4882a593Smuzhiyun 	if (memslot->flags & KVM_MEM_LOG_DIRTY_PAGES && !pte_dirty(old_pte))
512*4882a593Smuzhiyun 		hva_pte = pte_mkclean(hva_pte);
513*4882a593Smuzhiyun 	else if (memslot->flags & KVM_MEM_READONLY)
514*4882a593Smuzhiyun 		hva_pte = pte_wrprotect(hva_pte);
515*4882a593Smuzhiyun 
516*4882a593Smuzhiyun 	set_pte(gpa_pte, hva_pte);
517*4882a593Smuzhiyun 
518*4882a593Smuzhiyun 	/* Replacing an absent or old page doesn't need flushes */
519*4882a593Smuzhiyun 	if (!pte_present(old_pte) || !pte_young(old_pte))
520*4882a593Smuzhiyun 		return 0;
521*4882a593Smuzhiyun 
522*4882a593Smuzhiyun 	/* Pages swapped, aged, moved, or cleaned require flushes */
523*4882a593Smuzhiyun 	return !pte_present(hva_pte) ||
524*4882a593Smuzhiyun 	       !pte_young(hva_pte) ||
525*4882a593Smuzhiyun 	       pte_pfn(old_pte) != pte_pfn(hva_pte) ||
526*4882a593Smuzhiyun 	       (pte_dirty(old_pte) && !pte_dirty(hva_pte));
527*4882a593Smuzhiyun }
528*4882a593Smuzhiyun 
kvm_set_spte_hva(struct kvm * kvm,unsigned long hva,pte_t pte)529*4882a593Smuzhiyun int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte)
530*4882a593Smuzhiyun {
531*4882a593Smuzhiyun 	unsigned long end = hva + PAGE_SIZE;
532*4882a593Smuzhiyun 	int ret;
533*4882a593Smuzhiyun 
534*4882a593Smuzhiyun 	ret = handle_hva_to_gpa(kvm, hva, end, &kvm_set_spte_handler, &pte);
535*4882a593Smuzhiyun 	if (ret)
536*4882a593Smuzhiyun 		kvm_mips_callbacks->flush_shadow_all(kvm);
537*4882a593Smuzhiyun 	return 0;
538*4882a593Smuzhiyun }
539*4882a593Smuzhiyun 
kvm_age_hva_handler(struct kvm * kvm,gfn_t gfn,gfn_t gfn_end,struct kvm_memory_slot * memslot,void * data)540*4882a593Smuzhiyun static int kvm_age_hva_handler(struct kvm *kvm, gfn_t gfn, gfn_t gfn_end,
541*4882a593Smuzhiyun 			       struct kvm_memory_slot *memslot, void *data)
542*4882a593Smuzhiyun {
543*4882a593Smuzhiyun 	return kvm_mips_mkold_gpa_pt(kvm, gfn, gfn_end);
544*4882a593Smuzhiyun }
545*4882a593Smuzhiyun 
kvm_test_age_hva_handler(struct kvm * kvm,gfn_t gfn,gfn_t gfn_end,struct kvm_memory_slot * memslot,void * data)546*4882a593Smuzhiyun static int kvm_test_age_hva_handler(struct kvm *kvm, gfn_t gfn, gfn_t gfn_end,
547*4882a593Smuzhiyun 				    struct kvm_memory_slot *memslot, void *data)
548*4882a593Smuzhiyun {
549*4882a593Smuzhiyun 	gpa_t gpa = gfn << PAGE_SHIFT;
550*4882a593Smuzhiyun 	pte_t *gpa_pte = kvm_mips_pte_for_gpa(kvm, NULL, gpa);
551*4882a593Smuzhiyun 
552*4882a593Smuzhiyun 	if (!gpa_pte)
553*4882a593Smuzhiyun 		return 0;
554*4882a593Smuzhiyun 	return pte_young(*gpa_pte);
555*4882a593Smuzhiyun }
556*4882a593Smuzhiyun 
kvm_age_hva(struct kvm * kvm,unsigned long start,unsigned long end)557*4882a593Smuzhiyun int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end)
558*4882a593Smuzhiyun {
559*4882a593Smuzhiyun 	return handle_hva_to_gpa(kvm, start, end, kvm_age_hva_handler, NULL);
560*4882a593Smuzhiyun }
561*4882a593Smuzhiyun 
kvm_test_age_hva(struct kvm * kvm,unsigned long hva)562*4882a593Smuzhiyun int kvm_test_age_hva(struct kvm *kvm, unsigned long hva)
563*4882a593Smuzhiyun {
564*4882a593Smuzhiyun 	return handle_hva_to_gpa(kvm, hva, hva, kvm_test_age_hva_handler, NULL);
565*4882a593Smuzhiyun }
566*4882a593Smuzhiyun 
567*4882a593Smuzhiyun /**
568*4882a593Smuzhiyun  * _kvm_mips_map_page_fast() - Fast path GPA fault handler.
569*4882a593Smuzhiyun  * @vcpu:		VCPU pointer.
570*4882a593Smuzhiyun  * @gpa:		Guest physical address of fault.
571*4882a593Smuzhiyun  * @write_fault:	Whether the fault was due to a write.
572*4882a593Smuzhiyun  * @out_entry:		New PTE for @gpa (written on success unless NULL).
573*4882a593Smuzhiyun  * @out_buddy:		New PTE for @gpa's buddy (written on success unless
574*4882a593Smuzhiyun  *			NULL).
575*4882a593Smuzhiyun  *
576*4882a593Smuzhiyun  * Perform fast path GPA fault handling, doing all that can be done without
577*4882a593Smuzhiyun  * calling into KVM. This handles marking old pages young (for idle page
578*4882a593Smuzhiyun  * tracking), and dirtying of clean pages (for dirty page logging).
579*4882a593Smuzhiyun  *
580*4882a593Smuzhiyun  * Returns:	0 on success, in which case we can update derived mappings and
581*4882a593Smuzhiyun  *		resume guest execution.
582*4882a593Smuzhiyun  *		-EFAULT on failure due to absent GPA mapping or write to
583*4882a593Smuzhiyun  *		read-only page, in which case KVM must be consulted.
584*4882a593Smuzhiyun  */
_kvm_mips_map_page_fast(struct kvm_vcpu * vcpu,unsigned long gpa,bool write_fault,pte_t * out_entry,pte_t * out_buddy)585*4882a593Smuzhiyun static int _kvm_mips_map_page_fast(struct kvm_vcpu *vcpu, unsigned long gpa,
586*4882a593Smuzhiyun 				   bool write_fault,
587*4882a593Smuzhiyun 				   pte_t *out_entry, pte_t *out_buddy)
588*4882a593Smuzhiyun {
589*4882a593Smuzhiyun 	struct kvm *kvm = vcpu->kvm;
590*4882a593Smuzhiyun 	gfn_t gfn = gpa >> PAGE_SHIFT;
591*4882a593Smuzhiyun 	pte_t *ptep;
592*4882a593Smuzhiyun 	kvm_pfn_t pfn = 0;	/* silence bogus GCC warning */
593*4882a593Smuzhiyun 	bool pfn_valid = false;
594*4882a593Smuzhiyun 	int ret = 0;
595*4882a593Smuzhiyun 
596*4882a593Smuzhiyun 	spin_lock(&kvm->mmu_lock);
597*4882a593Smuzhiyun 
598*4882a593Smuzhiyun 	/* Fast path - just check GPA page table for an existing entry */
599*4882a593Smuzhiyun 	ptep = kvm_mips_pte_for_gpa(kvm, NULL, gpa);
600*4882a593Smuzhiyun 	if (!ptep || !pte_present(*ptep)) {
601*4882a593Smuzhiyun 		ret = -EFAULT;
602*4882a593Smuzhiyun 		goto out;
603*4882a593Smuzhiyun 	}
604*4882a593Smuzhiyun 
605*4882a593Smuzhiyun 	/* Track access to pages marked old */
606*4882a593Smuzhiyun 	if (!pte_young(*ptep)) {
607*4882a593Smuzhiyun 		set_pte(ptep, pte_mkyoung(*ptep));
608*4882a593Smuzhiyun 		pfn = pte_pfn(*ptep);
609*4882a593Smuzhiyun 		pfn_valid = true;
610*4882a593Smuzhiyun 		/* call kvm_set_pfn_accessed() after unlock */
611*4882a593Smuzhiyun 	}
612*4882a593Smuzhiyun 	if (write_fault && !pte_dirty(*ptep)) {
613*4882a593Smuzhiyun 		if (!pte_write(*ptep)) {
614*4882a593Smuzhiyun 			ret = -EFAULT;
615*4882a593Smuzhiyun 			goto out;
616*4882a593Smuzhiyun 		}
617*4882a593Smuzhiyun 
618*4882a593Smuzhiyun 		/* Track dirtying of writeable pages */
619*4882a593Smuzhiyun 		set_pte(ptep, pte_mkdirty(*ptep));
620*4882a593Smuzhiyun 		pfn = pte_pfn(*ptep);
621*4882a593Smuzhiyun 		mark_page_dirty(kvm, gfn);
622*4882a593Smuzhiyun 		kvm_set_pfn_dirty(pfn);
623*4882a593Smuzhiyun 	}
624*4882a593Smuzhiyun 
625*4882a593Smuzhiyun 	if (out_entry)
626*4882a593Smuzhiyun 		*out_entry = *ptep;
627*4882a593Smuzhiyun 	if (out_buddy)
628*4882a593Smuzhiyun 		*out_buddy = *ptep_buddy(ptep);
629*4882a593Smuzhiyun 
630*4882a593Smuzhiyun out:
631*4882a593Smuzhiyun 	spin_unlock(&kvm->mmu_lock);
632*4882a593Smuzhiyun 	if (pfn_valid)
633*4882a593Smuzhiyun 		kvm_set_pfn_accessed(pfn);
634*4882a593Smuzhiyun 	return ret;
635*4882a593Smuzhiyun }
636*4882a593Smuzhiyun 
637*4882a593Smuzhiyun /**
638*4882a593Smuzhiyun  * kvm_mips_map_page() - Map a guest physical page.
639*4882a593Smuzhiyun  * @vcpu:		VCPU pointer.
640*4882a593Smuzhiyun  * @gpa:		Guest physical address of fault.
641*4882a593Smuzhiyun  * @write_fault:	Whether the fault was due to a write.
642*4882a593Smuzhiyun  * @out_entry:		New PTE for @gpa (written on success unless NULL).
643*4882a593Smuzhiyun  * @out_buddy:		New PTE for @gpa's buddy (written on success unless
644*4882a593Smuzhiyun  *			NULL).
645*4882a593Smuzhiyun  *
646*4882a593Smuzhiyun  * Handle GPA faults by creating a new GPA mapping (or updating an existing
647*4882a593Smuzhiyun  * one).
648*4882a593Smuzhiyun  *
649*4882a593Smuzhiyun  * This takes care of marking pages young or dirty (idle/dirty page tracking),
650*4882a593Smuzhiyun  * asking KVM for the corresponding PFN, and creating a mapping in the GPA page
651*4882a593Smuzhiyun  * tables. Derived mappings (GVA page tables and TLBs) must be handled by the
652*4882a593Smuzhiyun  * caller.
653*4882a593Smuzhiyun  *
654*4882a593Smuzhiyun  * Returns:	0 on success, in which case the caller may use the @out_entry
655*4882a593Smuzhiyun  *		and @out_buddy PTEs to update derived mappings and resume guest
656*4882a593Smuzhiyun  *		execution.
657*4882a593Smuzhiyun  *		-EFAULT if there is no memory region at @gpa or a write was
658*4882a593Smuzhiyun  *		attempted to a read-only memory region. This is usually handled
659*4882a593Smuzhiyun  *		as an MMIO access.
660*4882a593Smuzhiyun  */
kvm_mips_map_page(struct kvm_vcpu * vcpu,unsigned long gpa,bool write_fault,pte_t * out_entry,pte_t * out_buddy)661*4882a593Smuzhiyun static int kvm_mips_map_page(struct kvm_vcpu *vcpu, unsigned long gpa,
662*4882a593Smuzhiyun 			     bool write_fault,
663*4882a593Smuzhiyun 			     pte_t *out_entry, pte_t *out_buddy)
664*4882a593Smuzhiyun {
665*4882a593Smuzhiyun 	struct kvm *kvm = vcpu->kvm;
666*4882a593Smuzhiyun 	struct kvm_mmu_memory_cache *memcache = &vcpu->arch.mmu_page_cache;
667*4882a593Smuzhiyun 	gfn_t gfn = gpa >> PAGE_SHIFT;
668*4882a593Smuzhiyun 	int srcu_idx, err;
669*4882a593Smuzhiyun 	kvm_pfn_t pfn;
670*4882a593Smuzhiyun 	pte_t *ptep, entry, old_pte;
671*4882a593Smuzhiyun 	bool writeable;
672*4882a593Smuzhiyun 	unsigned long prot_bits;
673*4882a593Smuzhiyun 	unsigned long mmu_seq;
674*4882a593Smuzhiyun 
675*4882a593Smuzhiyun 	/* Try the fast path to handle old / clean pages */
676*4882a593Smuzhiyun 	srcu_idx = srcu_read_lock(&kvm->srcu);
677*4882a593Smuzhiyun 	err = _kvm_mips_map_page_fast(vcpu, gpa, write_fault, out_entry,
678*4882a593Smuzhiyun 				      out_buddy);
679*4882a593Smuzhiyun 	if (!err)
680*4882a593Smuzhiyun 		goto out;
681*4882a593Smuzhiyun 
682*4882a593Smuzhiyun 	/* We need a minimum of cached pages ready for page table creation */
683*4882a593Smuzhiyun 	err = kvm_mmu_topup_memory_cache(memcache, KVM_MMU_CACHE_MIN_PAGES);
684*4882a593Smuzhiyun 	if (err)
685*4882a593Smuzhiyun 		goto out;
686*4882a593Smuzhiyun 
687*4882a593Smuzhiyun retry:
688*4882a593Smuzhiyun 	/*
689*4882a593Smuzhiyun 	 * Used to check for invalidations in progress, of the pfn that is
690*4882a593Smuzhiyun 	 * returned by pfn_to_pfn_prot below.
691*4882a593Smuzhiyun 	 */
692*4882a593Smuzhiyun 	mmu_seq = kvm->mmu_notifier_seq;
693*4882a593Smuzhiyun 	/*
694*4882a593Smuzhiyun 	 * Ensure the read of mmu_notifier_seq isn't reordered with PTE reads in
695*4882a593Smuzhiyun 	 * gfn_to_pfn_prot() (which calls get_user_pages()), so that we don't
696*4882a593Smuzhiyun 	 * risk the page we get a reference to getting unmapped before we have a
697*4882a593Smuzhiyun 	 * chance to grab the mmu_lock without mmu_notifier_retry() noticing.
698*4882a593Smuzhiyun 	 *
699*4882a593Smuzhiyun 	 * This smp_rmb() pairs with the effective smp_wmb() of the combination
700*4882a593Smuzhiyun 	 * of the pte_unmap_unlock() after the PTE is zapped, and the
701*4882a593Smuzhiyun 	 * spin_lock() in kvm_mmu_notifier_invalidate_<page|range_end>() before
702*4882a593Smuzhiyun 	 * mmu_notifier_seq is incremented.
703*4882a593Smuzhiyun 	 */
704*4882a593Smuzhiyun 	smp_rmb();
705*4882a593Smuzhiyun 
706*4882a593Smuzhiyun 	/* Slow path - ask KVM core whether we can access this GPA */
707*4882a593Smuzhiyun 	pfn = gfn_to_pfn_prot(kvm, gfn, write_fault, &writeable);
708*4882a593Smuzhiyun 	if (is_error_noslot_pfn(pfn)) {
709*4882a593Smuzhiyun 		err = -EFAULT;
710*4882a593Smuzhiyun 		goto out;
711*4882a593Smuzhiyun 	}
712*4882a593Smuzhiyun 
713*4882a593Smuzhiyun 	spin_lock(&kvm->mmu_lock);
714*4882a593Smuzhiyun 	/* Check if an invalidation has taken place since we got pfn */
715*4882a593Smuzhiyun 	if (mmu_notifier_retry(kvm, mmu_seq)) {
716*4882a593Smuzhiyun 		/*
717*4882a593Smuzhiyun 		 * This can happen when mappings are changed asynchronously, but
718*4882a593Smuzhiyun 		 * also synchronously if a COW is triggered by
719*4882a593Smuzhiyun 		 * gfn_to_pfn_prot().
720*4882a593Smuzhiyun 		 */
721*4882a593Smuzhiyun 		spin_unlock(&kvm->mmu_lock);
722*4882a593Smuzhiyun 		kvm_release_pfn_clean(pfn);
723*4882a593Smuzhiyun 		goto retry;
724*4882a593Smuzhiyun 	}
725*4882a593Smuzhiyun 
726*4882a593Smuzhiyun 	/* Ensure page tables are allocated */
727*4882a593Smuzhiyun 	ptep = kvm_mips_pte_for_gpa(kvm, memcache, gpa);
728*4882a593Smuzhiyun 
729*4882a593Smuzhiyun 	/* Set up the PTE */
730*4882a593Smuzhiyun 	prot_bits = _PAGE_PRESENT | __READABLE | _page_cachable_default;
731*4882a593Smuzhiyun 	if (writeable) {
732*4882a593Smuzhiyun 		prot_bits |= _PAGE_WRITE;
733*4882a593Smuzhiyun 		if (write_fault) {
734*4882a593Smuzhiyun 			prot_bits |= __WRITEABLE;
735*4882a593Smuzhiyun 			mark_page_dirty(kvm, gfn);
736*4882a593Smuzhiyun 			kvm_set_pfn_dirty(pfn);
737*4882a593Smuzhiyun 		}
738*4882a593Smuzhiyun 	}
739*4882a593Smuzhiyun 	entry = pfn_pte(pfn, __pgprot(prot_bits));
740*4882a593Smuzhiyun 
741*4882a593Smuzhiyun 	/* Write the PTE */
742*4882a593Smuzhiyun 	old_pte = *ptep;
743*4882a593Smuzhiyun 	set_pte(ptep, entry);
744*4882a593Smuzhiyun 
745*4882a593Smuzhiyun 	err = 0;
746*4882a593Smuzhiyun 	if (out_entry)
747*4882a593Smuzhiyun 		*out_entry = *ptep;
748*4882a593Smuzhiyun 	if (out_buddy)
749*4882a593Smuzhiyun 		*out_buddy = *ptep_buddy(ptep);
750*4882a593Smuzhiyun 
751*4882a593Smuzhiyun 	spin_unlock(&kvm->mmu_lock);
752*4882a593Smuzhiyun 	kvm_release_pfn_clean(pfn);
753*4882a593Smuzhiyun 	kvm_set_pfn_accessed(pfn);
754*4882a593Smuzhiyun out:
755*4882a593Smuzhiyun 	srcu_read_unlock(&kvm->srcu, srcu_idx);
756*4882a593Smuzhiyun 	return err;
757*4882a593Smuzhiyun }
758*4882a593Smuzhiyun 
kvm_trap_emul_pte_for_gva(struct kvm_vcpu * vcpu,unsigned long addr)759*4882a593Smuzhiyun static pte_t *kvm_trap_emul_pte_for_gva(struct kvm_vcpu *vcpu,
760*4882a593Smuzhiyun 					unsigned long addr)
761*4882a593Smuzhiyun {
762*4882a593Smuzhiyun 	struct kvm_mmu_memory_cache *memcache = &vcpu->arch.mmu_page_cache;
763*4882a593Smuzhiyun 	pgd_t *pgdp;
764*4882a593Smuzhiyun 	int ret;
765*4882a593Smuzhiyun 
766*4882a593Smuzhiyun 	/* We need a minimum of cached pages ready for page table creation */
767*4882a593Smuzhiyun 	ret = kvm_mmu_topup_memory_cache(memcache, KVM_MMU_CACHE_MIN_PAGES);
768*4882a593Smuzhiyun 	if (ret)
769*4882a593Smuzhiyun 		return NULL;
770*4882a593Smuzhiyun 
771*4882a593Smuzhiyun 	if (KVM_GUEST_KERNEL_MODE(vcpu))
772*4882a593Smuzhiyun 		pgdp = vcpu->arch.guest_kernel_mm.pgd;
773*4882a593Smuzhiyun 	else
774*4882a593Smuzhiyun 		pgdp = vcpu->arch.guest_user_mm.pgd;
775*4882a593Smuzhiyun 
776*4882a593Smuzhiyun 	return kvm_mips_walk_pgd(pgdp, memcache, addr);
777*4882a593Smuzhiyun }
778*4882a593Smuzhiyun 
kvm_trap_emul_invalidate_gva(struct kvm_vcpu * vcpu,unsigned long addr,bool user)779*4882a593Smuzhiyun void kvm_trap_emul_invalidate_gva(struct kvm_vcpu *vcpu, unsigned long addr,
780*4882a593Smuzhiyun 				  bool user)
781*4882a593Smuzhiyun {
782*4882a593Smuzhiyun 	pgd_t *pgdp;
783*4882a593Smuzhiyun 	pte_t *ptep;
784*4882a593Smuzhiyun 
785*4882a593Smuzhiyun 	addr &= PAGE_MASK << 1;
786*4882a593Smuzhiyun 
787*4882a593Smuzhiyun 	pgdp = vcpu->arch.guest_kernel_mm.pgd;
788*4882a593Smuzhiyun 	ptep = kvm_mips_walk_pgd(pgdp, NULL, addr);
789*4882a593Smuzhiyun 	if (ptep) {
790*4882a593Smuzhiyun 		ptep[0] = pfn_pte(0, __pgprot(0));
791*4882a593Smuzhiyun 		ptep[1] = pfn_pte(0, __pgprot(0));
792*4882a593Smuzhiyun 	}
793*4882a593Smuzhiyun 
794*4882a593Smuzhiyun 	if (user) {
795*4882a593Smuzhiyun 		pgdp = vcpu->arch.guest_user_mm.pgd;
796*4882a593Smuzhiyun 		ptep = kvm_mips_walk_pgd(pgdp, NULL, addr);
797*4882a593Smuzhiyun 		if (ptep) {
798*4882a593Smuzhiyun 			ptep[0] = pfn_pte(0, __pgprot(0));
799*4882a593Smuzhiyun 			ptep[1] = pfn_pte(0, __pgprot(0));
800*4882a593Smuzhiyun 		}
801*4882a593Smuzhiyun 	}
802*4882a593Smuzhiyun }
803*4882a593Smuzhiyun 
804*4882a593Smuzhiyun /*
805*4882a593Smuzhiyun  * kvm_mips_flush_gva_{pte,pmd,pud,pgd,pt}.
806*4882a593Smuzhiyun  * Flush a range of guest physical address space from the VM's GPA page tables.
807*4882a593Smuzhiyun  */
808*4882a593Smuzhiyun 
kvm_mips_flush_gva_pte(pte_t * pte,unsigned long start_gva,unsigned long end_gva)809*4882a593Smuzhiyun static bool kvm_mips_flush_gva_pte(pte_t *pte, unsigned long start_gva,
810*4882a593Smuzhiyun 				   unsigned long end_gva)
811*4882a593Smuzhiyun {
812*4882a593Smuzhiyun 	int i_min = pte_index(start_gva);
813*4882a593Smuzhiyun 	int i_max = pte_index(end_gva);
814*4882a593Smuzhiyun 	bool safe_to_remove = (i_min == 0 && i_max == PTRS_PER_PTE - 1);
815*4882a593Smuzhiyun 	int i;
816*4882a593Smuzhiyun 
817*4882a593Smuzhiyun 	/*
818*4882a593Smuzhiyun 	 * There's no freeing to do, so there's no point clearing individual
819*4882a593Smuzhiyun 	 * entries unless only part of the last level page table needs flushing.
820*4882a593Smuzhiyun 	 */
821*4882a593Smuzhiyun 	if (safe_to_remove)
822*4882a593Smuzhiyun 		return true;
823*4882a593Smuzhiyun 
824*4882a593Smuzhiyun 	for (i = i_min; i <= i_max; ++i) {
825*4882a593Smuzhiyun 		if (!pte_present(pte[i]))
826*4882a593Smuzhiyun 			continue;
827*4882a593Smuzhiyun 
828*4882a593Smuzhiyun 		set_pte(pte + i, __pte(0));
829*4882a593Smuzhiyun 	}
830*4882a593Smuzhiyun 	return false;
831*4882a593Smuzhiyun }
832*4882a593Smuzhiyun 
kvm_mips_flush_gva_pmd(pmd_t * pmd,unsigned long start_gva,unsigned long end_gva)833*4882a593Smuzhiyun static bool kvm_mips_flush_gva_pmd(pmd_t *pmd, unsigned long start_gva,
834*4882a593Smuzhiyun 				   unsigned long end_gva)
835*4882a593Smuzhiyun {
836*4882a593Smuzhiyun 	pte_t *pte;
837*4882a593Smuzhiyun 	unsigned long end = ~0ul;
838*4882a593Smuzhiyun 	int i_min = pmd_index(start_gva);
839*4882a593Smuzhiyun 	int i_max = pmd_index(end_gva);
840*4882a593Smuzhiyun 	bool safe_to_remove = (i_min == 0 && i_max == PTRS_PER_PMD - 1);
841*4882a593Smuzhiyun 	int i;
842*4882a593Smuzhiyun 
843*4882a593Smuzhiyun 	for (i = i_min; i <= i_max; ++i, start_gva = 0) {
844*4882a593Smuzhiyun 		if (!pmd_present(pmd[i]))
845*4882a593Smuzhiyun 			continue;
846*4882a593Smuzhiyun 
847*4882a593Smuzhiyun 		pte = pte_offset_kernel(pmd + i, 0);
848*4882a593Smuzhiyun 		if (i == i_max)
849*4882a593Smuzhiyun 			end = end_gva;
850*4882a593Smuzhiyun 
851*4882a593Smuzhiyun 		if (kvm_mips_flush_gva_pte(pte, start_gva, end)) {
852*4882a593Smuzhiyun 			pmd_clear(pmd + i);
853*4882a593Smuzhiyun 			pte_free_kernel(NULL, pte);
854*4882a593Smuzhiyun 		} else {
855*4882a593Smuzhiyun 			safe_to_remove = false;
856*4882a593Smuzhiyun 		}
857*4882a593Smuzhiyun 	}
858*4882a593Smuzhiyun 	return safe_to_remove;
859*4882a593Smuzhiyun }
860*4882a593Smuzhiyun 
kvm_mips_flush_gva_pud(pud_t * pud,unsigned long start_gva,unsigned long end_gva)861*4882a593Smuzhiyun static bool kvm_mips_flush_gva_pud(pud_t *pud, unsigned long start_gva,
862*4882a593Smuzhiyun 				   unsigned long end_gva)
863*4882a593Smuzhiyun {
864*4882a593Smuzhiyun 	pmd_t *pmd;
865*4882a593Smuzhiyun 	unsigned long end = ~0ul;
866*4882a593Smuzhiyun 	int i_min = pud_index(start_gva);
867*4882a593Smuzhiyun 	int i_max = pud_index(end_gva);
868*4882a593Smuzhiyun 	bool safe_to_remove = (i_min == 0 && i_max == PTRS_PER_PUD - 1);
869*4882a593Smuzhiyun 	int i;
870*4882a593Smuzhiyun 
871*4882a593Smuzhiyun 	for (i = i_min; i <= i_max; ++i, start_gva = 0) {
872*4882a593Smuzhiyun 		if (!pud_present(pud[i]))
873*4882a593Smuzhiyun 			continue;
874*4882a593Smuzhiyun 
875*4882a593Smuzhiyun 		pmd = pmd_offset(pud + i, 0);
876*4882a593Smuzhiyun 		if (i == i_max)
877*4882a593Smuzhiyun 			end = end_gva;
878*4882a593Smuzhiyun 
879*4882a593Smuzhiyun 		if (kvm_mips_flush_gva_pmd(pmd, start_gva, end)) {
880*4882a593Smuzhiyun 			pud_clear(pud + i);
881*4882a593Smuzhiyun 			pmd_free(NULL, pmd);
882*4882a593Smuzhiyun 		} else {
883*4882a593Smuzhiyun 			safe_to_remove = false;
884*4882a593Smuzhiyun 		}
885*4882a593Smuzhiyun 	}
886*4882a593Smuzhiyun 	return safe_to_remove;
887*4882a593Smuzhiyun }
888*4882a593Smuzhiyun 
kvm_mips_flush_gva_pgd(pgd_t * pgd,unsigned long start_gva,unsigned long end_gva)889*4882a593Smuzhiyun static bool kvm_mips_flush_gva_pgd(pgd_t *pgd, unsigned long start_gva,
890*4882a593Smuzhiyun 				   unsigned long end_gva)
891*4882a593Smuzhiyun {
892*4882a593Smuzhiyun 	p4d_t *p4d;
893*4882a593Smuzhiyun 	pud_t *pud;
894*4882a593Smuzhiyun 	unsigned long end = ~0ul;
895*4882a593Smuzhiyun 	int i_min = pgd_index(start_gva);
896*4882a593Smuzhiyun 	int i_max = pgd_index(end_gva);
897*4882a593Smuzhiyun 	bool safe_to_remove = (i_min == 0 && i_max == PTRS_PER_PGD - 1);
898*4882a593Smuzhiyun 	int i;
899*4882a593Smuzhiyun 
900*4882a593Smuzhiyun 	for (i = i_min; i <= i_max; ++i, start_gva = 0) {
901*4882a593Smuzhiyun 		if (!pgd_present(pgd[i]))
902*4882a593Smuzhiyun 			continue;
903*4882a593Smuzhiyun 
904*4882a593Smuzhiyun 		p4d = p4d_offset(pgd, 0);
905*4882a593Smuzhiyun 		pud = pud_offset(p4d + i, 0);
906*4882a593Smuzhiyun 		if (i == i_max)
907*4882a593Smuzhiyun 			end = end_gva;
908*4882a593Smuzhiyun 
909*4882a593Smuzhiyun 		if (kvm_mips_flush_gva_pud(pud, start_gva, end)) {
910*4882a593Smuzhiyun 			pgd_clear(pgd + i);
911*4882a593Smuzhiyun 			pud_free(NULL, pud);
912*4882a593Smuzhiyun 		} else {
913*4882a593Smuzhiyun 			safe_to_remove = false;
914*4882a593Smuzhiyun 		}
915*4882a593Smuzhiyun 	}
916*4882a593Smuzhiyun 	return safe_to_remove;
917*4882a593Smuzhiyun }
918*4882a593Smuzhiyun 
kvm_mips_flush_gva_pt(pgd_t * pgd,enum kvm_mips_flush flags)919*4882a593Smuzhiyun void kvm_mips_flush_gva_pt(pgd_t *pgd, enum kvm_mips_flush flags)
920*4882a593Smuzhiyun {
921*4882a593Smuzhiyun 	if (flags & KMF_GPA) {
922*4882a593Smuzhiyun 		/* all of guest virtual address space could be affected */
923*4882a593Smuzhiyun 		if (flags & KMF_KERN)
924*4882a593Smuzhiyun 			/* useg, kseg0, seg2/3 */
925*4882a593Smuzhiyun 			kvm_mips_flush_gva_pgd(pgd, 0, 0x7fffffff);
926*4882a593Smuzhiyun 		else
927*4882a593Smuzhiyun 			/* useg */
928*4882a593Smuzhiyun 			kvm_mips_flush_gva_pgd(pgd, 0, 0x3fffffff);
929*4882a593Smuzhiyun 	} else {
930*4882a593Smuzhiyun 		/* useg */
931*4882a593Smuzhiyun 		kvm_mips_flush_gva_pgd(pgd, 0, 0x3fffffff);
932*4882a593Smuzhiyun 
933*4882a593Smuzhiyun 		/* kseg2/3 */
934*4882a593Smuzhiyun 		if (flags & KMF_KERN)
935*4882a593Smuzhiyun 			kvm_mips_flush_gva_pgd(pgd, 0x60000000, 0x7fffffff);
936*4882a593Smuzhiyun 	}
937*4882a593Smuzhiyun }
938*4882a593Smuzhiyun 
kvm_mips_gpa_pte_to_gva_unmapped(pte_t pte)939*4882a593Smuzhiyun static pte_t kvm_mips_gpa_pte_to_gva_unmapped(pte_t pte)
940*4882a593Smuzhiyun {
941*4882a593Smuzhiyun 	/*
942*4882a593Smuzhiyun 	 * Don't leak writeable but clean entries from GPA page tables. We don't
943*4882a593Smuzhiyun 	 * want the normal Linux tlbmod handler to handle dirtying when KVM
944*4882a593Smuzhiyun 	 * accesses guest memory.
945*4882a593Smuzhiyun 	 */
946*4882a593Smuzhiyun 	if (!pte_dirty(pte))
947*4882a593Smuzhiyun 		pte = pte_wrprotect(pte);
948*4882a593Smuzhiyun 
949*4882a593Smuzhiyun 	return pte;
950*4882a593Smuzhiyun }
951*4882a593Smuzhiyun 
kvm_mips_gpa_pte_to_gva_mapped(pte_t pte,long entrylo)952*4882a593Smuzhiyun static pte_t kvm_mips_gpa_pte_to_gva_mapped(pte_t pte, long entrylo)
953*4882a593Smuzhiyun {
954*4882a593Smuzhiyun 	/* Guest EntryLo overrides host EntryLo */
955*4882a593Smuzhiyun 	if (!(entrylo & ENTRYLO_D))
956*4882a593Smuzhiyun 		pte = pte_mkclean(pte);
957*4882a593Smuzhiyun 
958*4882a593Smuzhiyun 	return kvm_mips_gpa_pte_to_gva_unmapped(pte);
959*4882a593Smuzhiyun }
960*4882a593Smuzhiyun 
961*4882a593Smuzhiyun #ifdef CONFIG_KVM_MIPS_VZ
kvm_mips_handle_vz_root_tlb_fault(unsigned long badvaddr,struct kvm_vcpu * vcpu,bool write_fault)962*4882a593Smuzhiyun int kvm_mips_handle_vz_root_tlb_fault(unsigned long badvaddr,
963*4882a593Smuzhiyun 				      struct kvm_vcpu *vcpu,
964*4882a593Smuzhiyun 				      bool write_fault)
965*4882a593Smuzhiyun {
966*4882a593Smuzhiyun 	int ret;
967*4882a593Smuzhiyun 
968*4882a593Smuzhiyun 	ret = kvm_mips_map_page(vcpu, badvaddr, write_fault, NULL, NULL);
969*4882a593Smuzhiyun 	if (ret)
970*4882a593Smuzhiyun 		return ret;
971*4882a593Smuzhiyun 
972*4882a593Smuzhiyun 	/* Invalidate this entry in the TLB */
973*4882a593Smuzhiyun 	return kvm_vz_host_tlb_inv(vcpu, badvaddr);
974*4882a593Smuzhiyun }
975*4882a593Smuzhiyun #endif
976*4882a593Smuzhiyun 
977*4882a593Smuzhiyun /* XXXKYMA: Must be called with interrupts disabled */
kvm_mips_handle_kseg0_tlb_fault(unsigned long badvaddr,struct kvm_vcpu * vcpu,bool write_fault)978*4882a593Smuzhiyun int kvm_mips_handle_kseg0_tlb_fault(unsigned long badvaddr,
979*4882a593Smuzhiyun 				    struct kvm_vcpu *vcpu,
980*4882a593Smuzhiyun 				    bool write_fault)
981*4882a593Smuzhiyun {
982*4882a593Smuzhiyun 	unsigned long gpa;
983*4882a593Smuzhiyun 	pte_t pte_gpa[2], *ptep_gva;
984*4882a593Smuzhiyun 	int idx;
985*4882a593Smuzhiyun 
986*4882a593Smuzhiyun 	if (KVM_GUEST_KSEGX(badvaddr) != KVM_GUEST_KSEG0) {
987*4882a593Smuzhiyun 		kvm_err("%s: Invalid BadVaddr: %#lx\n", __func__, badvaddr);
988*4882a593Smuzhiyun 		kvm_mips_dump_host_tlbs();
989*4882a593Smuzhiyun 		return -1;
990*4882a593Smuzhiyun 	}
991*4882a593Smuzhiyun 
992*4882a593Smuzhiyun 	/* Get the GPA page table entry */
993*4882a593Smuzhiyun 	gpa = KVM_GUEST_CPHYSADDR(badvaddr);
994*4882a593Smuzhiyun 	idx = (badvaddr >> PAGE_SHIFT) & 1;
995*4882a593Smuzhiyun 	if (kvm_mips_map_page(vcpu, gpa, write_fault, &pte_gpa[idx],
996*4882a593Smuzhiyun 			      &pte_gpa[!idx]) < 0)
997*4882a593Smuzhiyun 		return -1;
998*4882a593Smuzhiyun 
999*4882a593Smuzhiyun 	/* Get the GVA page table entry */
1000*4882a593Smuzhiyun 	ptep_gva = kvm_trap_emul_pte_for_gva(vcpu, badvaddr & ~PAGE_SIZE);
1001*4882a593Smuzhiyun 	if (!ptep_gva) {
1002*4882a593Smuzhiyun 		kvm_err("No ptep for gva %lx\n", badvaddr);
1003*4882a593Smuzhiyun 		return -1;
1004*4882a593Smuzhiyun 	}
1005*4882a593Smuzhiyun 
1006*4882a593Smuzhiyun 	/* Copy a pair of entries from GPA page table to GVA page table */
1007*4882a593Smuzhiyun 	ptep_gva[0] = kvm_mips_gpa_pte_to_gva_unmapped(pte_gpa[0]);
1008*4882a593Smuzhiyun 	ptep_gva[1] = kvm_mips_gpa_pte_to_gva_unmapped(pte_gpa[1]);
1009*4882a593Smuzhiyun 
1010*4882a593Smuzhiyun 	/* Invalidate this entry in the TLB, guest kernel ASID only */
1011*4882a593Smuzhiyun 	kvm_mips_host_tlb_inv(vcpu, badvaddr, false, true);
1012*4882a593Smuzhiyun 	return 0;
1013*4882a593Smuzhiyun }
1014*4882a593Smuzhiyun 
kvm_mips_handle_mapped_seg_tlb_fault(struct kvm_vcpu * vcpu,struct kvm_mips_tlb * tlb,unsigned long gva,bool write_fault)1015*4882a593Smuzhiyun int kvm_mips_handle_mapped_seg_tlb_fault(struct kvm_vcpu *vcpu,
1016*4882a593Smuzhiyun 					 struct kvm_mips_tlb *tlb,
1017*4882a593Smuzhiyun 					 unsigned long gva,
1018*4882a593Smuzhiyun 					 bool write_fault)
1019*4882a593Smuzhiyun {
1020*4882a593Smuzhiyun 	struct kvm *kvm = vcpu->kvm;
1021*4882a593Smuzhiyun 	long tlb_lo[2];
1022*4882a593Smuzhiyun 	pte_t pte_gpa[2], *ptep_buddy, *ptep_gva;
1023*4882a593Smuzhiyun 	unsigned int idx = TLB_LO_IDX(*tlb, gva);
1024*4882a593Smuzhiyun 	bool kernel = KVM_GUEST_KERNEL_MODE(vcpu);
1025*4882a593Smuzhiyun 
1026*4882a593Smuzhiyun 	tlb_lo[0] = tlb->tlb_lo[0];
1027*4882a593Smuzhiyun 	tlb_lo[1] = tlb->tlb_lo[1];
1028*4882a593Smuzhiyun 
1029*4882a593Smuzhiyun 	/*
1030*4882a593Smuzhiyun 	 * The commpage address must not be mapped to anything else if the guest
1031*4882a593Smuzhiyun 	 * TLB contains entries nearby, or commpage accesses will break.
1032*4882a593Smuzhiyun 	 */
1033*4882a593Smuzhiyun 	if (!((gva ^ KVM_GUEST_COMMPAGE_ADDR) & VPN2_MASK & (PAGE_MASK << 1)))
1034*4882a593Smuzhiyun 		tlb_lo[TLB_LO_IDX(*tlb, KVM_GUEST_COMMPAGE_ADDR)] = 0;
1035*4882a593Smuzhiyun 
1036*4882a593Smuzhiyun 	/* Get the GPA page table entry */
1037*4882a593Smuzhiyun 	if (kvm_mips_map_page(vcpu, mips3_tlbpfn_to_paddr(tlb_lo[idx]),
1038*4882a593Smuzhiyun 			      write_fault, &pte_gpa[idx], NULL) < 0)
1039*4882a593Smuzhiyun 		return -1;
1040*4882a593Smuzhiyun 
1041*4882a593Smuzhiyun 	/* And its GVA buddy's GPA page table entry if it also exists */
1042*4882a593Smuzhiyun 	pte_gpa[!idx] = pfn_pte(0, __pgprot(0));
1043*4882a593Smuzhiyun 	if (tlb_lo[!idx] & ENTRYLO_V) {
1044*4882a593Smuzhiyun 		spin_lock(&kvm->mmu_lock);
1045*4882a593Smuzhiyun 		ptep_buddy = kvm_mips_pte_for_gpa(kvm, NULL,
1046*4882a593Smuzhiyun 					mips3_tlbpfn_to_paddr(tlb_lo[!idx]));
1047*4882a593Smuzhiyun 		if (ptep_buddy)
1048*4882a593Smuzhiyun 			pte_gpa[!idx] = *ptep_buddy;
1049*4882a593Smuzhiyun 		spin_unlock(&kvm->mmu_lock);
1050*4882a593Smuzhiyun 	}
1051*4882a593Smuzhiyun 
1052*4882a593Smuzhiyun 	/* Get the GVA page table entry pair */
1053*4882a593Smuzhiyun 	ptep_gva = kvm_trap_emul_pte_for_gva(vcpu, gva & ~PAGE_SIZE);
1054*4882a593Smuzhiyun 	if (!ptep_gva) {
1055*4882a593Smuzhiyun 		kvm_err("No ptep for gva %lx\n", gva);
1056*4882a593Smuzhiyun 		return -1;
1057*4882a593Smuzhiyun 	}
1058*4882a593Smuzhiyun 
1059*4882a593Smuzhiyun 	/* Copy a pair of entries from GPA page table to GVA page table */
1060*4882a593Smuzhiyun 	ptep_gva[0] = kvm_mips_gpa_pte_to_gva_mapped(pte_gpa[0], tlb_lo[0]);
1061*4882a593Smuzhiyun 	ptep_gva[1] = kvm_mips_gpa_pte_to_gva_mapped(pte_gpa[1], tlb_lo[1]);
1062*4882a593Smuzhiyun 
1063*4882a593Smuzhiyun 	/* Invalidate this entry in the TLB, current guest mode ASID only */
1064*4882a593Smuzhiyun 	kvm_mips_host_tlb_inv(vcpu, gva, !kernel, kernel);
1065*4882a593Smuzhiyun 
1066*4882a593Smuzhiyun 	kvm_debug("@ %#lx tlb_lo0: 0x%08lx tlb_lo1: 0x%08lx\n", vcpu->arch.pc,
1067*4882a593Smuzhiyun 		  tlb->tlb_lo[0], tlb->tlb_lo[1]);
1068*4882a593Smuzhiyun 
1069*4882a593Smuzhiyun 	return 0;
1070*4882a593Smuzhiyun }
1071*4882a593Smuzhiyun 
kvm_mips_handle_commpage_tlb_fault(unsigned long badvaddr,struct kvm_vcpu * vcpu)1072*4882a593Smuzhiyun int kvm_mips_handle_commpage_tlb_fault(unsigned long badvaddr,
1073*4882a593Smuzhiyun 				       struct kvm_vcpu *vcpu)
1074*4882a593Smuzhiyun {
1075*4882a593Smuzhiyun 	kvm_pfn_t pfn;
1076*4882a593Smuzhiyun 	pte_t *ptep;
1077*4882a593Smuzhiyun 
1078*4882a593Smuzhiyun 	ptep = kvm_trap_emul_pte_for_gva(vcpu, badvaddr);
1079*4882a593Smuzhiyun 	if (!ptep) {
1080*4882a593Smuzhiyun 		kvm_err("No ptep for commpage %lx\n", badvaddr);
1081*4882a593Smuzhiyun 		return -1;
1082*4882a593Smuzhiyun 	}
1083*4882a593Smuzhiyun 
1084*4882a593Smuzhiyun 	pfn = PFN_DOWN(virt_to_phys(vcpu->arch.kseg0_commpage));
1085*4882a593Smuzhiyun 	/* Also set valid and dirty, so refill handler doesn't have to */
1086*4882a593Smuzhiyun 	*ptep = pte_mkyoung(pte_mkdirty(pfn_pte(pfn, PAGE_SHARED)));
1087*4882a593Smuzhiyun 
1088*4882a593Smuzhiyun 	/* Invalidate this entry in the TLB, guest kernel ASID only */
1089*4882a593Smuzhiyun 	kvm_mips_host_tlb_inv(vcpu, badvaddr, false, true);
1090*4882a593Smuzhiyun 	return 0;
1091*4882a593Smuzhiyun }
1092*4882a593Smuzhiyun 
1093*4882a593Smuzhiyun /**
1094*4882a593Smuzhiyun  * kvm_mips_migrate_count() - Migrate timer.
1095*4882a593Smuzhiyun  * @vcpu:	Virtual CPU.
1096*4882a593Smuzhiyun  *
1097*4882a593Smuzhiyun  * Migrate CP0_Count hrtimer to the current CPU by cancelling and restarting it
1098*4882a593Smuzhiyun  * if it was running prior to being cancelled.
1099*4882a593Smuzhiyun  *
1100*4882a593Smuzhiyun  * Must be called when the VCPU is migrated to a different CPU to ensure that
1101*4882a593Smuzhiyun  * timer expiry during guest execution interrupts the guest and causes the
1102*4882a593Smuzhiyun  * interrupt to be delivered in a timely manner.
1103*4882a593Smuzhiyun  */
kvm_mips_migrate_count(struct kvm_vcpu * vcpu)1104*4882a593Smuzhiyun static void kvm_mips_migrate_count(struct kvm_vcpu *vcpu)
1105*4882a593Smuzhiyun {
1106*4882a593Smuzhiyun 	if (hrtimer_cancel(&vcpu->arch.comparecount_timer))
1107*4882a593Smuzhiyun 		hrtimer_restart(&vcpu->arch.comparecount_timer);
1108*4882a593Smuzhiyun }
1109*4882a593Smuzhiyun 
1110*4882a593Smuzhiyun /* Restore ASID once we are scheduled back after preemption */
kvm_arch_vcpu_load(struct kvm_vcpu * vcpu,int cpu)1111*4882a593Smuzhiyun void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
1112*4882a593Smuzhiyun {
1113*4882a593Smuzhiyun 	unsigned long flags;
1114*4882a593Smuzhiyun 
1115*4882a593Smuzhiyun 	kvm_debug("%s: vcpu %p, cpu: %d\n", __func__, vcpu, cpu);
1116*4882a593Smuzhiyun 
1117*4882a593Smuzhiyun 	local_irq_save(flags);
1118*4882a593Smuzhiyun 
1119*4882a593Smuzhiyun 	vcpu->cpu = cpu;
1120*4882a593Smuzhiyun 	if (vcpu->arch.last_sched_cpu != cpu) {
1121*4882a593Smuzhiyun 		kvm_debug("[%d->%d]KVM VCPU[%d] switch\n",
1122*4882a593Smuzhiyun 			  vcpu->arch.last_sched_cpu, cpu, vcpu->vcpu_id);
1123*4882a593Smuzhiyun 		/*
1124*4882a593Smuzhiyun 		 * Migrate the timer interrupt to the current CPU so that it
1125*4882a593Smuzhiyun 		 * always interrupts the guest and synchronously triggers a
1126*4882a593Smuzhiyun 		 * guest timer interrupt.
1127*4882a593Smuzhiyun 		 */
1128*4882a593Smuzhiyun 		kvm_mips_migrate_count(vcpu);
1129*4882a593Smuzhiyun 	}
1130*4882a593Smuzhiyun 
1131*4882a593Smuzhiyun 	/* restore guest state to registers */
1132*4882a593Smuzhiyun 	kvm_mips_callbacks->vcpu_load(vcpu, cpu);
1133*4882a593Smuzhiyun 
1134*4882a593Smuzhiyun 	local_irq_restore(flags);
1135*4882a593Smuzhiyun }
1136*4882a593Smuzhiyun 
1137*4882a593Smuzhiyun /* ASID can change if another task is scheduled during preemption */
kvm_arch_vcpu_put(struct kvm_vcpu * vcpu)1138*4882a593Smuzhiyun void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
1139*4882a593Smuzhiyun {
1140*4882a593Smuzhiyun 	unsigned long flags;
1141*4882a593Smuzhiyun 	int cpu;
1142*4882a593Smuzhiyun 
1143*4882a593Smuzhiyun 	local_irq_save(flags);
1144*4882a593Smuzhiyun 
1145*4882a593Smuzhiyun 	cpu = smp_processor_id();
1146*4882a593Smuzhiyun 	vcpu->arch.last_sched_cpu = cpu;
1147*4882a593Smuzhiyun 	vcpu->cpu = -1;
1148*4882a593Smuzhiyun 
1149*4882a593Smuzhiyun 	/* save guest state in registers */
1150*4882a593Smuzhiyun 	kvm_mips_callbacks->vcpu_put(vcpu, cpu);
1151*4882a593Smuzhiyun 
1152*4882a593Smuzhiyun 	local_irq_restore(flags);
1153*4882a593Smuzhiyun }
1154*4882a593Smuzhiyun 
1155*4882a593Smuzhiyun /**
1156*4882a593Smuzhiyun  * kvm_trap_emul_gva_fault() - Safely attempt to handle a GVA access fault.
1157*4882a593Smuzhiyun  * @vcpu:	Virtual CPU.
1158*4882a593Smuzhiyun  * @gva:	Guest virtual address to be accessed.
1159*4882a593Smuzhiyun  * @write:	True if write attempted (must be dirtied and made writable).
1160*4882a593Smuzhiyun  *
1161*4882a593Smuzhiyun  * Safely attempt to handle a GVA fault, mapping GVA pages if necessary, and
1162*4882a593Smuzhiyun  * dirtying the page if @write so that guest instructions can be modified.
1163*4882a593Smuzhiyun  *
1164*4882a593Smuzhiyun  * Returns:	KVM_MIPS_MAPPED on success.
1165*4882a593Smuzhiyun  *		KVM_MIPS_GVA if bad guest virtual address.
1166*4882a593Smuzhiyun  *		KVM_MIPS_GPA if bad guest physical address.
1167*4882a593Smuzhiyun  *		KVM_MIPS_TLB if guest TLB not present.
1168*4882a593Smuzhiyun  *		KVM_MIPS_TLBINV if guest TLB present but not valid.
1169*4882a593Smuzhiyun  *		KVM_MIPS_TLBMOD if guest TLB read only.
1170*4882a593Smuzhiyun  */
kvm_trap_emul_gva_fault(struct kvm_vcpu * vcpu,unsigned long gva,bool write)1171*4882a593Smuzhiyun enum kvm_mips_fault_result kvm_trap_emul_gva_fault(struct kvm_vcpu *vcpu,
1172*4882a593Smuzhiyun 						   unsigned long gva,
1173*4882a593Smuzhiyun 						   bool write)
1174*4882a593Smuzhiyun {
1175*4882a593Smuzhiyun 	struct mips_coproc *cop0 = vcpu->arch.cop0;
1176*4882a593Smuzhiyun 	struct kvm_mips_tlb *tlb;
1177*4882a593Smuzhiyun 	int index;
1178*4882a593Smuzhiyun 
1179*4882a593Smuzhiyun 	if (KVM_GUEST_KSEGX(gva) == KVM_GUEST_KSEG0) {
1180*4882a593Smuzhiyun 		if (kvm_mips_handle_kseg0_tlb_fault(gva, vcpu, write) < 0)
1181*4882a593Smuzhiyun 			return KVM_MIPS_GPA;
1182*4882a593Smuzhiyun 	} else if ((KVM_GUEST_KSEGX(gva) < KVM_GUEST_KSEG0) ||
1183*4882a593Smuzhiyun 		   KVM_GUEST_KSEGX(gva) == KVM_GUEST_KSEG23) {
1184*4882a593Smuzhiyun 		/* Address should be in the guest TLB */
1185*4882a593Smuzhiyun 		index = kvm_mips_guest_tlb_lookup(vcpu, (gva & VPN2_MASK) |
1186*4882a593Smuzhiyun 			  (kvm_read_c0_guest_entryhi(cop0) & KVM_ENTRYHI_ASID));
1187*4882a593Smuzhiyun 		if (index < 0)
1188*4882a593Smuzhiyun 			return KVM_MIPS_TLB;
1189*4882a593Smuzhiyun 		tlb = &vcpu->arch.guest_tlb[index];
1190*4882a593Smuzhiyun 
1191*4882a593Smuzhiyun 		/* Entry should be valid, and dirty for writes */
1192*4882a593Smuzhiyun 		if (!TLB_IS_VALID(*tlb, gva))
1193*4882a593Smuzhiyun 			return KVM_MIPS_TLBINV;
1194*4882a593Smuzhiyun 		if (write && !TLB_IS_DIRTY(*tlb, gva))
1195*4882a593Smuzhiyun 			return KVM_MIPS_TLBMOD;
1196*4882a593Smuzhiyun 
1197*4882a593Smuzhiyun 		if (kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb, gva, write))
1198*4882a593Smuzhiyun 			return KVM_MIPS_GPA;
1199*4882a593Smuzhiyun 	} else {
1200*4882a593Smuzhiyun 		return KVM_MIPS_GVA;
1201*4882a593Smuzhiyun 	}
1202*4882a593Smuzhiyun 
1203*4882a593Smuzhiyun 	return KVM_MIPS_MAPPED;
1204*4882a593Smuzhiyun }
1205*4882a593Smuzhiyun 
kvm_get_inst(u32 * opc,struct kvm_vcpu * vcpu,u32 * out)1206*4882a593Smuzhiyun int kvm_get_inst(u32 *opc, struct kvm_vcpu *vcpu, u32 *out)
1207*4882a593Smuzhiyun {
1208*4882a593Smuzhiyun 	int err;
1209*4882a593Smuzhiyun 
1210*4882a593Smuzhiyun 	if (WARN(IS_ENABLED(CONFIG_KVM_MIPS_VZ),
1211*4882a593Smuzhiyun 		 "Expect BadInstr/BadInstrP registers to be used with VZ\n"))
1212*4882a593Smuzhiyun 		return -EINVAL;
1213*4882a593Smuzhiyun 
1214*4882a593Smuzhiyun retry:
1215*4882a593Smuzhiyun 	kvm_trap_emul_gva_lockless_begin(vcpu);
1216*4882a593Smuzhiyun 	err = get_user(*out, opc);
1217*4882a593Smuzhiyun 	kvm_trap_emul_gva_lockless_end(vcpu);
1218*4882a593Smuzhiyun 
1219*4882a593Smuzhiyun 	if (unlikely(err)) {
1220*4882a593Smuzhiyun 		/*
1221*4882a593Smuzhiyun 		 * Try to handle the fault, maybe we just raced with a GVA
1222*4882a593Smuzhiyun 		 * invalidation.
1223*4882a593Smuzhiyun 		 */
1224*4882a593Smuzhiyun 		err = kvm_trap_emul_gva_fault(vcpu, (unsigned long)opc,
1225*4882a593Smuzhiyun 					      false);
1226*4882a593Smuzhiyun 		if (unlikely(err)) {
1227*4882a593Smuzhiyun 			kvm_err("%s: illegal address: %p\n",
1228*4882a593Smuzhiyun 				__func__, opc);
1229*4882a593Smuzhiyun 			return -EFAULT;
1230*4882a593Smuzhiyun 		}
1231*4882a593Smuzhiyun 
1232*4882a593Smuzhiyun 		/* Hopefully it'll work now */
1233*4882a593Smuzhiyun 		goto retry;
1234*4882a593Smuzhiyun 	}
1235*4882a593Smuzhiyun 	return 0;
1236*4882a593Smuzhiyun }
1237