xref: /OK3568_Linux_fs/kernel/arch/powerpc/kvm/e500_mmu_host.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Copyright (C) 2008-2013 Freescale Semiconductor, Inc. All rights reserved.
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  * Author: Yu Liu, yu.liu@freescale.com
6*4882a593Smuzhiyun  *         Scott Wood, scottwood@freescale.com
7*4882a593Smuzhiyun  *         Ashish Kalra, ashish.kalra@freescale.com
8*4882a593Smuzhiyun  *         Varun Sethi, varun.sethi@freescale.com
9*4882a593Smuzhiyun  *         Alexander Graf, agraf@suse.de
10*4882a593Smuzhiyun  *
11*4882a593Smuzhiyun  * Description:
12*4882a593Smuzhiyun  * This file is based on arch/powerpc/kvm/44x_tlb.c,
13*4882a593Smuzhiyun  * by Hollis Blanchard <hollisb@us.ibm.com>.
14*4882a593Smuzhiyun  */
15*4882a593Smuzhiyun 
16*4882a593Smuzhiyun #include <linux/kernel.h>
17*4882a593Smuzhiyun #include <linux/types.h>
18*4882a593Smuzhiyun #include <linux/slab.h>
19*4882a593Smuzhiyun #include <linux/string.h>
20*4882a593Smuzhiyun #include <linux/kvm.h>
21*4882a593Smuzhiyun #include <linux/kvm_host.h>
22*4882a593Smuzhiyun #include <linux/highmem.h>
23*4882a593Smuzhiyun #include <linux/log2.h>
24*4882a593Smuzhiyun #include <linux/uaccess.h>
25*4882a593Smuzhiyun #include <linux/sched/mm.h>
26*4882a593Smuzhiyun #include <linux/rwsem.h>
27*4882a593Smuzhiyun #include <linux/vmalloc.h>
28*4882a593Smuzhiyun #include <linux/hugetlb.h>
29*4882a593Smuzhiyun #include <asm/kvm_ppc.h>
30*4882a593Smuzhiyun #include <asm/pte-walk.h>
31*4882a593Smuzhiyun 
32*4882a593Smuzhiyun #include "e500.h"
33*4882a593Smuzhiyun #include "timing.h"
34*4882a593Smuzhiyun #include "e500_mmu_host.h"
35*4882a593Smuzhiyun 
36*4882a593Smuzhiyun #include "trace_booke.h"
37*4882a593Smuzhiyun 
38*4882a593Smuzhiyun #define to_htlb1_esel(esel) (host_tlb_params[1].entries - (esel) - 1)
39*4882a593Smuzhiyun 
40*4882a593Smuzhiyun static struct kvmppc_e500_tlb_params host_tlb_params[E500_TLB_NUM];
41*4882a593Smuzhiyun 
tlb1_max_shadow_size(void)42*4882a593Smuzhiyun static inline unsigned int tlb1_max_shadow_size(void)
43*4882a593Smuzhiyun {
44*4882a593Smuzhiyun 	/* reserve one entry for magic page */
45*4882a593Smuzhiyun 	return host_tlb_params[1].entries - tlbcam_index - 1;
46*4882a593Smuzhiyun }
47*4882a593Smuzhiyun 
e500_shadow_mas3_attrib(u32 mas3,int usermode)48*4882a593Smuzhiyun static inline u32 e500_shadow_mas3_attrib(u32 mas3, int usermode)
49*4882a593Smuzhiyun {
50*4882a593Smuzhiyun 	/* Mask off reserved bits. */
51*4882a593Smuzhiyun 	mas3 &= MAS3_ATTRIB_MASK;
52*4882a593Smuzhiyun 
53*4882a593Smuzhiyun #ifndef CONFIG_KVM_BOOKE_HV
54*4882a593Smuzhiyun 	if (!usermode) {
55*4882a593Smuzhiyun 		/* Guest is in supervisor mode,
56*4882a593Smuzhiyun 		 * so we need to translate guest
57*4882a593Smuzhiyun 		 * supervisor permissions into user permissions. */
58*4882a593Smuzhiyun 		mas3 &= ~E500_TLB_USER_PERM_MASK;
59*4882a593Smuzhiyun 		mas3 |= (mas3 & E500_TLB_SUPER_PERM_MASK) << 1;
60*4882a593Smuzhiyun 	}
61*4882a593Smuzhiyun 	mas3 |= E500_TLB_SUPER_PERM_MASK;
62*4882a593Smuzhiyun #endif
63*4882a593Smuzhiyun 	return mas3;
64*4882a593Smuzhiyun }
65*4882a593Smuzhiyun 
66*4882a593Smuzhiyun /*
67*4882a593Smuzhiyun  * writing shadow tlb entry to host TLB
68*4882a593Smuzhiyun  */
__write_host_tlbe(struct kvm_book3e_206_tlb_entry * stlbe,uint32_t mas0,uint32_t lpid)69*4882a593Smuzhiyun static inline void __write_host_tlbe(struct kvm_book3e_206_tlb_entry *stlbe,
70*4882a593Smuzhiyun 				     uint32_t mas0,
71*4882a593Smuzhiyun 				     uint32_t lpid)
72*4882a593Smuzhiyun {
73*4882a593Smuzhiyun 	unsigned long flags;
74*4882a593Smuzhiyun 
75*4882a593Smuzhiyun 	local_irq_save(flags);
76*4882a593Smuzhiyun 	mtspr(SPRN_MAS0, mas0);
77*4882a593Smuzhiyun 	mtspr(SPRN_MAS1, stlbe->mas1);
78*4882a593Smuzhiyun 	mtspr(SPRN_MAS2, (unsigned long)stlbe->mas2);
79*4882a593Smuzhiyun 	mtspr(SPRN_MAS3, (u32)stlbe->mas7_3);
80*4882a593Smuzhiyun 	mtspr(SPRN_MAS7, (u32)(stlbe->mas7_3 >> 32));
81*4882a593Smuzhiyun #ifdef CONFIG_KVM_BOOKE_HV
82*4882a593Smuzhiyun 	mtspr(SPRN_MAS8, MAS8_TGS | get_thread_specific_lpid(lpid));
83*4882a593Smuzhiyun #endif
84*4882a593Smuzhiyun 	asm volatile("isync; tlbwe" : : : "memory");
85*4882a593Smuzhiyun 
86*4882a593Smuzhiyun #ifdef CONFIG_KVM_BOOKE_HV
87*4882a593Smuzhiyun 	/* Must clear mas8 for other host tlbwe's */
88*4882a593Smuzhiyun 	mtspr(SPRN_MAS8, 0);
89*4882a593Smuzhiyun 	isync();
90*4882a593Smuzhiyun #endif
91*4882a593Smuzhiyun 	local_irq_restore(flags);
92*4882a593Smuzhiyun 
93*4882a593Smuzhiyun 	trace_kvm_booke206_stlb_write(mas0, stlbe->mas8, stlbe->mas1,
94*4882a593Smuzhiyun 	                              stlbe->mas2, stlbe->mas7_3);
95*4882a593Smuzhiyun }
96*4882a593Smuzhiyun 
97*4882a593Smuzhiyun /*
98*4882a593Smuzhiyun  * Acquire a mas0 with victim hint, as if we just took a TLB miss.
99*4882a593Smuzhiyun  *
100*4882a593Smuzhiyun  * We don't care about the address we're searching for, other than that it's
101*4882a593Smuzhiyun  * in the right set and is not present in the TLB.  Using a zero PID and a
102*4882a593Smuzhiyun  * userspace address means we don't have to set and then restore MAS5, or
103*4882a593Smuzhiyun  * calculate a proper MAS6 value.
104*4882a593Smuzhiyun  */
get_host_mas0(unsigned long eaddr)105*4882a593Smuzhiyun static u32 get_host_mas0(unsigned long eaddr)
106*4882a593Smuzhiyun {
107*4882a593Smuzhiyun 	unsigned long flags;
108*4882a593Smuzhiyun 	u32 mas0;
109*4882a593Smuzhiyun 	u32 mas4;
110*4882a593Smuzhiyun 
111*4882a593Smuzhiyun 	local_irq_save(flags);
112*4882a593Smuzhiyun 	mtspr(SPRN_MAS6, 0);
113*4882a593Smuzhiyun 	mas4 = mfspr(SPRN_MAS4);
114*4882a593Smuzhiyun 	mtspr(SPRN_MAS4, mas4 & ~MAS4_TLBSEL_MASK);
115*4882a593Smuzhiyun 	asm volatile("tlbsx 0, %0" : : "b" (eaddr & ~CONFIG_PAGE_OFFSET));
116*4882a593Smuzhiyun 	mas0 = mfspr(SPRN_MAS0);
117*4882a593Smuzhiyun 	mtspr(SPRN_MAS4, mas4);
118*4882a593Smuzhiyun 	local_irq_restore(flags);
119*4882a593Smuzhiyun 
120*4882a593Smuzhiyun 	return mas0;
121*4882a593Smuzhiyun }
122*4882a593Smuzhiyun 
123*4882a593Smuzhiyun /* sesel is for tlb1 only */
write_host_tlbe(struct kvmppc_vcpu_e500 * vcpu_e500,int tlbsel,int sesel,struct kvm_book3e_206_tlb_entry * stlbe)124*4882a593Smuzhiyun static inline void write_host_tlbe(struct kvmppc_vcpu_e500 *vcpu_e500,
125*4882a593Smuzhiyun 		int tlbsel, int sesel, struct kvm_book3e_206_tlb_entry *stlbe)
126*4882a593Smuzhiyun {
127*4882a593Smuzhiyun 	u32 mas0;
128*4882a593Smuzhiyun 
129*4882a593Smuzhiyun 	if (tlbsel == 0) {
130*4882a593Smuzhiyun 		mas0 = get_host_mas0(stlbe->mas2);
131*4882a593Smuzhiyun 		__write_host_tlbe(stlbe, mas0, vcpu_e500->vcpu.kvm->arch.lpid);
132*4882a593Smuzhiyun 	} else {
133*4882a593Smuzhiyun 		__write_host_tlbe(stlbe,
134*4882a593Smuzhiyun 				  MAS0_TLBSEL(1) |
135*4882a593Smuzhiyun 				  MAS0_ESEL(to_htlb1_esel(sesel)),
136*4882a593Smuzhiyun 				  vcpu_e500->vcpu.kvm->arch.lpid);
137*4882a593Smuzhiyun 	}
138*4882a593Smuzhiyun }
139*4882a593Smuzhiyun 
140*4882a593Smuzhiyun /* sesel is for tlb1 only */
write_stlbe(struct kvmppc_vcpu_e500 * vcpu_e500,struct kvm_book3e_206_tlb_entry * gtlbe,struct kvm_book3e_206_tlb_entry * stlbe,int stlbsel,int sesel)141*4882a593Smuzhiyun static void write_stlbe(struct kvmppc_vcpu_e500 *vcpu_e500,
142*4882a593Smuzhiyun 			struct kvm_book3e_206_tlb_entry *gtlbe,
143*4882a593Smuzhiyun 			struct kvm_book3e_206_tlb_entry *stlbe,
144*4882a593Smuzhiyun 			int stlbsel, int sesel)
145*4882a593Smuzhiyun {
146*4882a593Smuzhiyun 	int stid;
147*4882a593Smuzhiyun 
148*4882a593Smuzhiyun 	preempt_disable();
149*4882a593Smuzhiyun 	stid = kvmppc_e500_get_tlb_stid(&vcpu_e500->vcpu, gtlbe);
150*4882a593Smuzhiyun 
151*4882a593Smuzhiyun 	stlbe->mas1 |= MAS1_TID(stid);
152*4882a593Smuzhiyun 	write_host_tlbe(vcpu_e500, stlbsel, sesel, stlbe);
153*4882a593Smuzhiyun 	preempt_enable();
154*4882a593Smuzhiyun }
155*4882a593Smuzhiyun 
156*4882a593Smuzhiyun #ifdef CONFIG_KVM_E500V2
157*4882a593Smuzhiyun /* XXX should be a hook in the gva2hpa translation */
kvmppc_map_magic(struct kvm_vcpu * vcpu)158*4882a593Smuzhiyun void kvmppc_map_magic(struct kvm_vcpu *vcpu)
159*4882a593Smuzhiyun {
160*4882a593Smuzhiyun 	struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
161*4882a593Smuzhiyun 	struct kvm_book3e_206_tlb_entry magic;
162*4882a593Smuzhiyun 	ulong shared_page = ((ulong)vcpu->arch.shared) & PAGE_MASK;
163*4882a593Smuzhiyun 	unsigned int stid;
164*4882a593Smuzhiyun 	kvm_pfn_t pfn;
165*4882a593Smuzhiyun 
166*4882a593Smuzhiyun 	pfn = (kvm_pfn_t)virt_to_phys((void *)shared_page) >> PAGE_SHIFT;
167*4882a593Smuzhiyun 	get_page(pfn_to_page(pfn));
168*4882a593Smuzhiyun 
169*4882a593Smuzhiyun 	preempt_disable();
170*4882a593Smuzhiyun 	stid = kvmppc_e500_get_sid(vcpu_e500, 0, 0, 0, 0);
171*4882a593Smuzhiyun 
172*4882a593Smuzhiyun 	magic.mas1 = MAS1_VALID | MAS1_TS | MAS1_TID(stid) |
173*4882a593Smuzhiyun 		     MAS1_TSIZE(BOOK3E_PAGESZ_4K);
174*4882a593Smuzhiyun 	magic.mas2 = vcpu->arch.magic_page_ea | MAS2_M;
175*4882a593Smuzhiyun 	magic.mas7_3 = ((u64)pfn << PAGE_SHIFT) |
176*4882a593Smuzhiyun 		       MAS3_SW | MAS3_SR | MAS3_UW | MAS3_UR;
177*4882a593Smuzhiyun 	magic.mas8 = 0;
178*4882a593Smuzhiyun 
179*4882a593Smuzhiyun 	__write_host_tlbe(&magic, MAS0_TLBSEL(1) | MAS0_ESEL(tlbcam_index), 0);
180*4882a593Smuzhiyun 	preempt_enable();
181*4882a593Smuzhiyun }
182*4882a593Smuzhiyun #endif
183*4882a593Smuzhiyun 
inval_gtlbe_on_host(struct kvmppc_vcpu_e500 * vcpu_e500,int tlbsel,int esel)184*4882a593Smuzhiyun void inval_gtlbe_on_host(struct kvmppc_vcpu_e500 *vcpu_e500, int tlbsel,
185*4882a593Smuzhiyun 			 int esel)
186*4882a593Smuzhiyun {
187*4882a593Smuzhiyun 	struct kvm_book3e_206_tlb_entry *gtlbe =
188*4882a593Smuzhiyun 		get_entry(vcpu_e500, tlbsel, esel);
189*4882a593Smuzhiyun 	struct tlbe_ref *ref = &vcpu_e500->gtlb_priv[tlbsel][esel].ref;
190*4882a593Smuzhiyun 
191*4882a593Smuzhiyun 	/* Don't bother with unmapped entries */
192*4882a593Smuzhiyun 	if (!(ref->flags & E500_TLB_VALID)) {
193*4882a593Smuzhiyun 		WARN(ref->flags & (E500_TLB_BITMAP | E500_TLB_TLB0),
194*4882a593Smuzhiyun 		     "%s: flags %x\n", __func__, ref->flags);
195*4882a593Smuzhiyun 		WARN_ON(tlbsel == 1 && vcpu_e500->g2h_tlb1_map[esel]);
196*4882a593Smuzhiyun 	}
197*4882a593Smuzhiyun 
198*4882a593Smuzhiyun 	if (tlbsel == 1 && ref->flags & E500_TLB_BITMAP) {
199*4882a593Smuzhiyun 		u64 tmp = vcpu_e500->g2h_tlb1_map[esel];
200*4882a593Smuzhiyun 		int hw_tlb_indx;
201*4882a593Smuzhiyun 		unsigned long flags;
202*4882a593Smuzhiyun 
203*4882a593Smuzhiyun 		local_irq_save(flags);
204*4882a593Smuzhiyun 		while (tmp) {
205*4882a593Smuzhiyun 			hw_tlb_indx = __ilog2_u64(tmp & -tmp);
206*4882a593Smuzhiyun 			mtspr(SPRN_MAS0,
207*4882a593Smuzhiyun 			      MAS0_TLBSEL(1) |
208*4882a593Smuzhiyun 			      MAS0_ESEL(to_htlb1_esel(hw_tlb_indx)));
209*4882a593Smuzhiyun 			mtspr(SPRN_MAS1, 0);
210*4882a593Smuzhiyun 			asm volatile("tlbwe");
211*4882a593Smuzhiyun 			vcpu_e500->h2g_tlb1_rmap[hw_tlb_indx] = 0;
212*4882a593Smuzhiyun 			tmp &= tmp - 1;
213*4882a593Smuzhiyun 		}
214*4882a593Smuzhiyun 		mb();
215*4882a593Smuzhiyun 		vcpu_e500->g2h_tlb1_map[esel] = 0;
216*4882a593Smuzhiyun 		ref->flags &= ~(E500_TLB_BITMAP | E500_TLB_VALID);
217*4882a593Smuzhiyun 		local_irq_restore(flags);
218*4882a593Smuzhiyun 	}
219*4882a593Smuzhiyun 
220*4882a593Smuzhiyun 	if (tlbsel == 1 && ref->flags & E500_TLB_TLB0) {
221*4882a593Smuzhiyun 		/*
222*4882a593Smuzhiyun 		 * TLB1 entry is backed by 4k pages. This should happen
223*4882a593Smuzhiyun 		 * rarely and is not worth optimizing. Invalidate everything.
224*4882a593Smuzhiyun 		 */
225*4882a593Smuzhiyun 		kvmppc_e500_tlbil_all(vcpu_e500);
226*4882a593Smuzhiyun 		ref->flags &= ~(E500_TLB_TLB0 | E500_TLB_VALID);
227*4882a593Smuzhiyun 	}
228*4882a593Smuzhiyun 
229*4882a593Smuzhiyun 	/*
230*4882a593Smuzhiyun 	 * If TLB entry is still valid then it's a TLB0 entry, and thus
231*4882a593Smuzhiyun 	 * backed by at most one host tlbe per shadow pid
232*4882a593Smuzhiyun 	 */
233*4882a593Smuzhiyun 	if (ref->flags & E500_TLB_VALID)
234*4882a593Smuzhiyun 		kvmppc_e500_tlbil_one(vcpu_e500, gtlbe);
235*4882a593Smuzhiyun 
236*4882a593Smuzhiyun 	/* Mark the TLB as not backed by the host anymore */
237*4882a593Smuzhiyun 	ref->flags = 0;
238*4882a593Smuzhiyun }
239*4882a593Smuzhiyun 
tlbe_is_writable(struct kvm_book3e_206_tlb_entry * tlbe)240*4882a593Smuzhiyun static inline int tlbe_is_writable(struct kvm_book3e_206_tlb_entry *tlbe)
241*4882a593Smuzhiyun {
242*4882a593Smuzhiyun 	return tlbe->mas7_3 & (MAS3_SW|MAS3_UW);
243*4882a593Smuzhiyun }
244*4882a593Smuzhiyun 
kvmppc_e500_ref_setup(struct tlbe_ref * ref,struct kvm_book3e_206_tlb_entry * gtlbe,kvm_pfn_t pfn,unsigned int wimg)245*4882a593Smuzhiyun static inline void kvmppc_e500_ref_setup(struct tlbe_ref *ref,
246*4882a593Smuzhiyun 					 struct kvm_book3e_206_tlb_entry *gtlbe,
247*4882a593Smuzhiyun 					 kvm_pfn_t pfn, unsigned int wimg)
248*4882a593Smuzhiyun {
249*4882a593Smuzhiyun 	ref->pfn = pfn;
250*4882a593Smuzhiyun 	ref->flags = E500_TLB_VALID;
251*4882a593Smuzhiyun 
252*4882a593Smuzhiyun 	/* Use guest supplied MAS2_G and MAS2_E */
253*4882a593Smuzhiyun 	ref->flags |= (gtlbe->mas2 & MAS2_ATTRIB_MASK) | wimg;
254*4882a593Smuzhiyun 
255*4882a593Smuzhiyun 	/* Mark the page accessed */
256*4882a593Smuzhiyun 	kvm_set_pfn_accessed(pfn);
257*4882a593Smuzhiyun 
258*4882a593Smuzhiyun 	if (tlbe_is_writable(gtlbe))
259*4882a593Smuzhiyun 		kvm_set_pfn_dirty(pfn);
260*4882a593Smuzhiyun }
261*4882a593Smuzhiyun 
kvmppc_e500_ref_release(struct tlbe_ref * ref)262*4882a593Smuzhiyun static inline void kvmppc_e500_ref_release(struct tlbe_ref *ref)
263*4882a593Smuzhiyun {
264*4882a593Smuzhiyun 	if (ref->flags & E500_TLB_VALID) {
265*4882a593Smuzhiyun 		/* FIXME: don't log bogus pfn for TLB1 */
266*4882a593Smuzhiyun 		trace_kvm_booke206_ref_release(ref->pfn, ref->flags);
267*4882a593Smuzhiyun 		ref->flags = 0;
268*4882a593Smuzhiyun 	}
269*4882a593Smuzhiyun }
270*4882a593Smuzhiyun 
clear_tlb1_bitmap(struct kvmppc_vcpu_e500 * vcpu_e500)271*4882a593Smuzhiyun static void clear_tlb1_bitmap(struct kvmppc_vcpu_e500 *vcpu_e500)
272*4882a593Smuzhiyun {
273*4882a593Smuzhiyun 	if (vcpu_e500->g2h_tlb1_map)
274*4882a593Smuzhiyun 		memset(vcpu_e500->g2h_tlb1_map, 0,
275*4882a593Smuzhiyun 		       sizeof(u64) * vcpu_e500->gtlb_params[1].entries);
276*4882a593Smuzhiyun 	if (vcpu_e500->h2g_tlb1_rmap)
277*4882a593Smuzhiyun 		memset(vcpu_e500->h2g_tlb1_rmap, 0,
278*4882a593Smuzhiyun 		       sizeof(unsigned int) * host_tlb_params[1].entries);
279*4882a593Smuzhiyun }
280*4882a593Smuzhiyun 
clear_tlb_privs(struct kvmppc_vcpu_e500 * vcpu_e500)281*4882a593Smuzhiyun static void clear_tlb_privs(struct kvmppc_vcpu_e500 *vcpu_e500)
282*4882a593Smuzhiyun {
283*4882a593Smuzhiyun 	int tlbsel;
284*4882a593Smuzhiyun 	int i;
285*4882a593Smuzhiyun 
286*4882a593Smuzhiyun 	for (tlbsel = 0; tlbsel <= 1; tlbsel++) {
287*4882a593Smuzhiyun 		for (i = 0; i < vcpu_e500->gtlb_params[tlbsel].entries; i++) {
288*4882a593Smuzhiyun 			struct tlbe_ref *ref =
289*4882a593Smuzhiyun 				&vcpu_e500->gtlb_priv[tlbsel][i].ref;
290*4882a593Smuzhiyun 			kvmppc_e500_ref_release(ref);
291*4882a593Smuzhiyun 		}
292*4882a593Smuzhiyun 	}
293*4882a593Smuzhiyun }
294*4882a593Smuzhiyun 
kvmppc_core_flush_tlb(struct kvm_vcpu * vcpu)295*4882a593Smuzhiyun void kvmppc_core_flush_tlb(struct kvm_vcpu *vcpu)
296*4882a593Smuzhiyun {
297*4882a593Smuzhiyun 	struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
298*4882a593Smuzhiyun 	kvmppc_e500_tlbil_all(vcpu_e500);
299*4882a593Smuzhiyun 	clear_tlb_privs(vcpu_e500);
300*4882a593Smuzhiyun 	clear_tlb1_bitmap(vcpu_e500);
301*4882a593Smuzhiyun }
302*4882a593Smuzhiyun 
303*4882a593Smuzhiyun /* TID must be supplied by the caller */
kvmppc_e500_setup_stlbe(struct kvm_vcpu * vcpu,struct kvm_book3e_206_tlb_entry * gtlbe,int tsize,struct tlbe_ref * ref,u64 gvaddr,struct kvm_book3e_206_tlb_entry * stlbe)304*4882a593Smuzhiyun static void kvmppc_e500_setup_stlbe(
305*4882a593Smuzhiyun 	struct kvm_vcpu *vcpu,
306*4882a593Smuzhiyun 	struct kvm_book3e_206_tlb_entry *gtlbe,
307*4882a593Smuzhiyun 	int tsize, struct tlbe_ref *ref, u64 gvaddr,
308*4882a593Smuzhiyun 	struct kvm_book3e_206_tlb_entry *stlbe)
309*4882a593Smuzhiyun {
310*4882a593Smuzhiyun 	kvm_pfn_t pfn = ref->pfn;
311*4882a593Smuzhiyun 	u32 pr = vcpu->arch.shared->msr & MSR_PR;
312*4882a593Smuzhiyun 
313*4882a593Smuzhiyun 	BUG_ON(!(ref->flags & E500_TLB_VALID));
314*4882a593Smuzhiyun 
315*4882a593Smuzhiyun 	/* Force IPROT=0 for all guest mappings. */
316*4882a593Smuzhiyun 	stlbe->mas1 = MAS1_TSIZE(tsize) | get_tlb_sts(gtlbe) | MAS1_VALID;
317*4882a593Smuzhiyun 	stlbe->mas2 = (gvaddr & MAS2_EPN) | (ref->flags & E500_TLB_MAS2_ATTR);
318*4882a593Smuzhiyun 	stlbe->mas7_3 = ((u64)pfn << PAGE_SHIFT) |
319*4882a593Smuzhiyun 			e500_shadow_mas3_attrib(gtlbe->mas7_3, pr);
320*4882a593Smuzhiyun }
321*4882a593Smuzhiyun 
kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 * vcpu_e500,u64 gvaddr,gfn_t gfn,struct kvm_book3e_206_tlb_entry * gtlbe,int tlbsel,struct kvm_book3e_206_tlb_entry * stlbe,struct tlbe_ref * ref)322*4882a593Smuzhiyun static inline int kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
323*4882a593Smuzhiyun 	u64 gvaddr, gfn_t gfn, struct kvm_book3e_206_tlb_entry *gtlbe,
324*4882a593Smuzhiyun 	int tlbsel, struct kvm_book3e_206_tlb_entry *stlbe,
325*4882a593Smuzhiyun 	struct tlbe_ref *ref)
326*4882a593Smuzhiyun {
327*4882a593Smuzhiyun 	struct kvm_memory_slot *slot;
328*4882a593Smuzhiyun 	unsigned long pfn = 0; /* silence GCC warning */
329*4882a593Smuzhiyun 	unsigned long hva;
330*4882a593Smuzhiyun 	int pfnmap = 0;
331*4882a593Smuzhiyun 	int tsize = BOOK3E_PAGESZ_4K;
332*4882a593Smuzhiyun 	int ret = 0;
333*4882a593Smuzhiyun 	unsigned long mmu_seq;
334*4882a593Smuzhiyun 	struct kvm *kvm = vcpu_e500->vcpu.kvm;
335*4882a593Smuzhiyun 	unsigned long tsize_pages = 0;
336*4882a593Smuzhiyun 	pte_t *ptep;
337*4882a593Smuzhiyun 	unsigned int wimg = 0;
338*4882a593Smuzhiyun 	pgd_t *pgdir;
339*4882a593Smuzhiyun 	unsigned long flags;
340*4882a593Smuzhiyun 
341*4882a593Smuzhiyun 	/* used to check for invalidations in progress */
342*4882a593Smuzhiyun 	mmu_seq = kvm->mmu_notifier_seq;
343*4882a593Smuzhiyun 	smp_rmb();
344*4882a593Smuzhiyun 
345*4882a593Smuzhiyun 	/*
346*4882a593Smuzhiyun 	 * Translate guest physical to true physical, acquiring
347*4882a593Smuzhiyun 	 * a page reference if it is normal, non-reserved memory.
348*4882a593Smuzhiyun 	 *
349*4882a593Smuzhiyun 	 * gfn_to_memslot() must succeed because otherwise we wouldn't
350*4882a593Smuzhiyun 	 * have gotten this far.  Eventually we should just pass the slot
351*4882a593Smuzhiyun 	 * pointer through from the first lookup.
352*4882a593Smuzhiyun 	 */
353*4882a593Smuzhiyun 	slot = gfn_to_memslot(vcpu_e500->vcpu.kvm, gfn);
354*4882a593Smuzhiyun 	hva = gfn_to_hva_memslot(slot, gfn);
355*4882a593Smuzhiyun 
356*4882a593Smuzhiyun 	if (tlbsel == 1) {
357*4882a593Smuzhiyun 		struct vm_area_struct *vma;
358*4882a593Smuzhiyun 		mmap_read_lock(kvm->mm);
359*4882a593Smuzhiyun 
360*4882a593Smuzhiyun 		vma = find_vma(kvm->mm, hva);
361*4882a593Smuzhiyun 		if (vma && hva >= vma->vm_start &&
362*4882a593Smuzhiyun 		    (vma->vm_flags & VM_PFNMAP)) {
363*4882a593Smuzhiyun 			/*
364*4882a593Smuzhiyun 			 * This VMA is a physically contiguous region (e.g.
365*4882a593Smuzhiyun 			 * /dev/mem) that bypasses normal Linux page
366*4882a593Smuzhiyun 			 * management.  Find the overlap between the
367*4882a593Smuzhiyun 			 * vma and the memslot.
368*4882a593Smuzhiyun 			 */
369*4882a593Smuzhiyun 
370*4882a593Smuzhiyun 			unsigned long start, end;
371*4882a593Smuzhiyun 			unsigned long slot_start, slot_end;
372*4882a593Smuzhiyun 
373*4882a593Smuzhiyun 			pfnmap = 1;
374*4882a593Smuzhiyun 
375*4882a593Smuzhiyun 			start = vma->vm_pgoff;
376*4882a593Smuzhiyun 			end = start +
377*4882a593Smuzhiyun 			      vma_pages(vma);
378*4882a593Smuzhiyun 
379*4882a593Smuzhiyun 			pfn = start + ((hva - vma->vm_start) >> PAGE_SHIFT);
380*4882a593Smuzhiyun 
381*4882a593Smuzhiyun 			slot_start = pfn - (gfn - slot->base_gfn);
382*4882a593Smuzhiyun 			slot_end = slot_start + slot->npages;
383*4882a593Smuzhiyun 
384*4882a593Smuzhiyun 			if (start < slot_start)
385*4882a593Smuzhiyun 				start = slot_start;
386*4882a593Smuzhiyun 			if (end > slot_end)
387*4882a593Smuzhiyun 				end = slot_end;
388*4882a593Smuzhiyun 
389*4882a593Smuzhiyun 			tsize = (gtlbe->mas1 & MAS1_TSIZE_MASK) >>
390*4882a593Smuzhiyun 				MAS1_TSIZE_SHIFT;
391*4882a593Smuzhiyun 
392*4882a593Smuzhiyun 			/*
393*4882a593Smuzhiyun 			 * e500 doesn't implement the lowest tsize bit,
394*4882a593Smuzhiyun 			 * or 1K pages.
395*4882a593Smuzhiyun 			 */
396*4882a593Smuzhiyun 			tsize = max(BOOK3E_PAGESZ_4K, tsize & ~1);
397*4882a593Smuzhiyun 
398*4882a593Smuzhiyun 			/*
399*4882a593Smuzhiyun 			 * Now find the largest tsize (up to what the guest
400*4882a593Smuzhiyun 			 * requested) that will cover gfn, stay within the
401*4882a593Smuzhiyun 			 * range, and for which gfn and pfn are mutually
402*4882a593Smuzhiyun 			 * aligned.
403*4882a593Smuzhiyun 			 */
404*4882a593Smuzhiyun 
405*4882a593Smuzhiyun 			for (; tsize > BOOK3E_PAGESZ_4K; tsize -= 2) {
406*4882a593Smuzhiyun 				unsigned long gfn_start, gfn_end;
407*4882a593Smuzhiyun 				tsize_pages = 1UL << (tsize - 2);
408*4882a593Smuzhiyun 
409*4882a593Smuzhiyun 				gfn_start = gfn & ~(tsize_pages - 1);
410*4882a593Smuzhiyun 				gfn_end = gfn_start + tsize_pages;
411*4882a593Smuzhiyun 
412*4882a593Smuzhiyun 				if (gfn_start + pfn - gfn < start)
413*4882a593Smuzhiyun 					continue;
414*4882a593Smuzhiyun 				if (gfn_end + pfn - gfn > end)
415*4882a593Smuzhiyun 					continue;
416*4882a593Smuzhiyun 				if ((gfn & (tsize_pages - 1)) !=
417*4882a593Smuzhiyun 				    (pfn & (tsize_pages - 1)))
418*4882a593Smuzhiyun 					continue;
419*4882a593Smuzhiyun 
420*4882a593Smuzhiyun 				gvaddr &= ~((tsize_pages << PAGE_SHIFT) - 1);
421*4882a593Smuzhiyun 				pfn &= ~(tsize_pages - 1);
422*4882a593Smuzhiyun 				break;
423*4882a593Smuzhiyun 			}
424*4882a593Smuzhiyun 		} else if (vma && hva >= vma->vm_start &&
425*4882a593Smuzhiyun 			   is_vm_hugetlb_page(vma)) {
426*4882a593Smuzhiyun 			unsigned long psize = vma_kernel_pagesize(vma);
427*4882a593Smuzhiyun 
428*4882a593Smuzhiyun 			tsize = (gtlbe->mas1 & MAS1_TSIZE_MASK) >>
429*4882a593Smuzhiyun 				MAS1_TSIZE_SHIFT;
430*4882a593Smuzhiyun 
431*4882a593Smuzhiyun 			/*
432*4882a593Smuzhiyun 			 * Take the largest page size that satisfies both host
433*4882a593Smuzhiyun 			 * and guest mapping
434*4882a593Smuzhiyun 			 */
435*4882a593Smuzhiyun 			tsize = min(__ilog2(psize) - 10, tsize);
436*4882a593Smuzhiyun 
437*4882a593Smuzhiyun 			/*
438*4882a593Smuzhiyun 			 * e500 doesn't implement the lowest tsize bit,
439*4882a593Smuzhiyun 			 * or 1K pages.
440*4882a593Smuzhiyun 			 */
441*4882a593Smuzhiyun 			tsize = max(BOOK3E_PAGESZ_4K, tsize & ~1);
442*4882a593Smuzhiyun 		}
443*4882a593Smuzhiyun 
444*4882a593Smuzhiyun 		mmap_read_unlock(kvm->mm);
445*4882a593Smuzhiyun 	}
446*4882a593Smuzhiyun 
447*4882a593Smuzhiyun 	if (likely(!pfnmap)) {
448*4882a593Smuzhiyun 		tsize_pages = 1UL << (tsize + 10 - PAGE_SHIFT);
449*4882a593Smuzhiyun 		pfn = gfn_to_pfn_memslot(slot, gfn);
450*4882a593Smuzhiyun 		if (is_error_noslot_pfn(pfn)) {
451*4882a593Smuzhiyun 			if (printk_ratelimit())
452*4882a593Smuzhiyun 				pr_err("%s: real page not found for gfn %lx\n",
453*4882a593Smuzhiyun 				       __func__, (long)gfn);
454*4882a593Smuzhiyun 			return -EINVAL;
455*4882a593Smuzhiyun 		}
456*4882a593Smuzhiyun 
457*4882a593Smuzhiyun 		/* Align guest and physical address to page map boundaries */
458*4882a593Smuzhiyun 		pfn &= ~(tsize_pages - 1);
459*4882a593Smuzhiyun 		gvaddr &= ~((tsize_pages << PAGE_SHIFT) - 1);
460*4882a593Smuzhiyun 	}
461*4882a593Smuzhiyun 
462*4882a593Smuzhiyun 	spin_lock(&kvm->mmu_lock);
463*4882a593Smuzhiyun 	if (mmu_notifier_retry(kvm, mmu_seq)) {
464*4882a593Smuzhiyun 		ret = -EAGAIN;
465*4882a593Smuzhiyun 		goto out;
466*4882a593Smuzhiyun 	}
467*4882a593Smuzhiyun 
468*4882a593Smuzhiyun 
469*4882a593Smuzhiyun 	pgdir = vcpu_e500->vcpu.arch.pgdir;
470*4882a593Smuzhiyun 	/*
471*4882a593Smuzhiyun 	 * We are just looking at the wimg bits, so we don't
472*4882a593Smuzhiyun 	 * care much about the trans splitting bit.
473*4882a593Smuzhiyun 	 * We are holding kvm->mmu_lock so a notifier invalidate
474*4882a593Smuzhiyun 	 * can't run hence pfn won't change.
475*4882a593Smuzhiyun 	 */
476*4882a593Smuzhiyun 	local_irq_save(flags);
477*4882a593Smuzhiyun 	ptep = find_linux_pte(pgdir, hva, NULL, NULL);
478*4882a593Smuzhiyun 	if (ptep) {
479*4882a593Smuzhiyun 		pte_t pte = READ_ONCE(*ptep);
480*4882a593Smuzhiyun 
481*4882a593Smuzhiyun 		if (pte_present(pte)) {
482*4882a593Smuzhiyun 			wimg = (pte_val(pte) >> PTE_WIMGE_SHIFT) &
483*4882a593Smuzhiyun 				MAS2_WIMGE_MASK;
484*4882a593Smuzhiyun 			local_irq_restore(flags);
485*4882a593Smuzhiyun 		} else {
486*4882a593Smuzhiyun 			local_irq_restore(flags);
487*4882a593Smuzhiyun 			pr_err_ratelimited("%s: pte not present: gfn %lx,pfn %lx\n",
488*4882a593Smuzhiyun 					   __func__, (long)gfn, pfn);
489*4882a593Smuzhiyun 			ret = -EINVAL;
490*4882a593Smuzhiyun 			goto out;
491*4882a593Smuzhiyun 		}
492*4882a593Smuzhiyun 	}
493*4882a593Smuzhiyun 	kvmppc_e500_ref_setup(ref, gtlbe, pfn, wimg);
494*4882a593Smuzhiyun 
495*4882a593Smuzhiyun 	kvmppc_e500_setup_stlbe(&vcpu_e500->vcpu, gtlbe, tsize,
496*4882a593Smuzhiyun 				ref, gvaddr, stlbe);
497*4882a593Smuzhiyun 
498*4882a593Smuzhiyun 	/* Clear i-cache for new pages */
499*4882a593Smuzhiyun 	kvmppc_mmu_flush_icache(pfn);
500*4882a593Smuzhiyun 
501*4882a593Smuzhiyun out:
502*4882a593Smuzhiyun 	spin_unlock(&kvm->mmu_lock);
503*4882a593Smuzhiyun 
504*4882a593Smuzhiyun 	/* Drop refcount on page, so that mmu notifiers can clear it */
505*4882a593Smuzhiyun 	kvm_release_pfn_clean(pfn);
506*4882a593Smuzhiyun 
507*4882a593Smuzhiyun 	return ret;
508*4882a593Smuzhiyun }
509*4882a593Smuzhiyun 
510*4882a593Smuzhiyun /* XXX only map the one-one case, for now use TLB0 */
kvmppc_e500_tlb0_map(struct kvmppc_vcpu_e500 * vcpu_e500,int esel,struct kvm_book3e_206_tlb_entry * stlbe)511*4882a593Smuzhiyun static int kvmppc_e500_tlb0_map(struct kvmppc_vcpu_e500 *vcpu_e500, int esel,
512*4882a593Smuzhiyun 				struct kvm_book3e_206_tlb_entry *stlbe)
513*4882a593Smuzhiyun {
514*4882a593Smuzhiyun 	struct kvm_book3e_206_tlb_entry *gtlbe;
515*4882a593Smuzhiyun 	struct tlbe_ref *ref;
516*4882a593Smuzhiyun 	int stlbsel = 0;
517*4882a593Smuzhiyun 	int sesel = 0;
518*4882a593Smuzhiyun 	int r;
519*4882a593Smuzhiyun 
520*4882a593Smuzhiyun 	gtlbe = get_entry(vcpu_e500, 0, esel);
521*4882a593Smuzhiyun 	ref = &vcpu_e500->gtlb_priv[0][esel].ref;
522*4882a593Smuzhiyun 
523*4882a593Smuzhiyun 	r = kvmppc_e500_shadow_map(vcpu_e500, get_tlb_eaddr(gtlbe),
524*4882a593Smuzhiyun 			get_tlb_raddr(gtlbe) >> PAGE_SHIFT,
525*4882a593Smuzhiyun 			gtlbe, 0, stlbe, ref);
526*4882a593Smuzhiyun 	if (r)
527*4882a593Smuzhiyun 		return r;
528*4882a593Smuzhiyun 
529*4882a593Smuzhiyun 	write_stlbe(vcpu_e500, gtlbe, stlbe, stlbsel, sesel);
530*4882a593Smuzhiyun 
531*4882a593Smuzhiyun 	return 0;
532*4882a593Smuzhiyun }
533*4882a593Smuzhiyun 
kvmppc_e500_tlb1_map_tlb1(struct kvmppc_vcpu_e500 * vcpu_e500,struct tlbe_ref * ref,int esel)534*4882a593Smuzhiyun static int kvmppc_e500_tlb1_map_tlb1(struct kvmppc_vcpu_e500 *vcpu_e500,
535*4882a593Smuzhiyun 				     struct tlbe_ref *ref,
536*4882a593Smuzhiyun 				     int esel)
537*4882a593Smuzhiyun {
538*4882a593Smuzhiyun 	unsigned int sesel = vcpu_e500->host_tlb1_nv++;
539*4882a593Smuzhiyun 
540*4882a593Smuzhiyun 	if (unlikely(vcpu_e500->host_tlb1_nv >= tlb1_max_shadow_size()))
541*4882a593Smuzhiyun 		vcpu_e500->host_tlb1_nv = 0;
542*4882a593Smuzhiyun 
543*4882a593Smuzhiyun 	if (vcpu_e500->h2g_tlb1_rmap[sesel]) {
544*4882a593Smuzhiyun 		unsigned int idx = vcpu_e500->h2g_tlb1_rmap[sesel] - 1;
545*4882a593Smuzhiyun 		vcpu_e500->g2h_tlb1_map[idx] &= ~(1ULL << sesel);
546*4882a593Smuzhiyun 	}
547*4882a593Smuzhiyun 
548*4882a593Smuzhiyun 	vcpu_e500->gtlb_priv[1][esel].ref.flags |= E500_TLB_BITMAP;
549*4882a593Smuzhiyun 	vcpu_e500->g2h_tlb1_map[esel] |= (u64)1 << sesel;
550*4882a593Smuzhiyun 	vcpu_e500->h2g_tlb1_rmap[sesel] = esel + 1;
551*4882a593Smuzhiyun 	WARN_ON(!(ref->flags & E500_TLB_VALID));
552*4882a593Smuzhiyun 
553*4882a593Smuzhiyun 	return sesel;
554*4882a593Smuzhiyun }
555*4882a593Smuzhiyun 
556*4882a593Smuzhiyun /* Caller must ensure that the specified guest TLB entry is safe to insert into
557*4882a593Smuzhiyun  * the shadow TLB. */
558*4882a593Smuzhiyun /* For both one-one and one-to-many */
kvmppc_e500_tlb1_map(struct kvmppc_vcpu_e500 * vcpu_e500,u64 gvaddr,gfn_t gfn,struct kvm_book3e_206_tlb_entry * gtlbe,struct kvm_book3e_206_tlb_entry * stlbe,int esel)559*4882a593Smuzhiyun static int kvmppc_e500_tlb1_map(struct kvmppc_vcpu_e500 *vcpu_e500,
560*4882a593Smuzhiyun 		u64 gvaddr, gfn_t gfn, struct kvm_book3e_206_tlb_entry *gtlbe,
561*4882a593Smuzhiyun 		struct kvm_book3e_206_tlb_entry *stlbe, int esel)
562*4882a593Smuzhiyun {
563*4882a593Smuzhiyun 	struct tlbe_ref *ref = &vcpu_e500->gtlb_priv[1][esel].ref;
564*4882a593Smuzhiyun 	int sesel;
565*4882a593Smuzhiyun 	int r;
566*4882a593Smuzhiyun 
567*4882a593Smuzhiyun 	r = kvmppc_e500_shadow_map(vcpu_e500, gvaddr, gfn, gtlbe, 1, stlbe,
568*4882a593Smuzhiyun 				   ref);
569*4882a593Smuzhiyun 	if (r)
570*4882a593Smuzhiyun 		return r;
571*4882a593Smuzhiyun 
572*4882a593Smuzhiyun 	/* Use TLB0 when we can only map a page with 4k */
573*4882a593Smuzhiyun 	if (get_tlb_tsize(stlbe) == BOOK3E_PAGESZ_4K) {
574*4882a593Smuzhiyun 		vcpu_e500->gtlb_priv[1][esel].ref.flags |= E500_TLB_TLB0;
575*4882a593Smuzhiyun 		write_stlbe(vcpu_e500, gtlbe, stlbe, 0, 0);
576*4882a593Smuzhiyun 		return 0;
577*4882a593Smuzhiyun 	}
578*4882a593Smuzhiyun 
579*4882a593Smuzhiyun 	/* Otherwise map into TLB1 */
580*4882a593Smuzhiyun 	sesel = kvmppc_e500_tlb1_map_tlb1(vcpu_e500, ref, esel);
581*4882a593Smuzhiyun 	write_stlbe(vcpu_e500, gtlbe, stlbe, 1, sesel);
582*4882a593Smuzhiyun 
583*4882a593Smuzhiyun 	return 0;
584*4882a593Smuzhiyun }
585*4882a593Smuzhiyun 
kvmppc_mmu_map(struct kvm_vcpu * vcpu,u64 eaddr,gpa_t gpaddr,unsigned int index)586*4882a593Smuzhiyun void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 eaddr, gpa_t gpaddr,
587*4882a593Smuzhiyun 		    unsigned int index)
588*4882a593Smuzhiyun {
589*4882a593Smuzhiyun 	struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
590*4882a593Smuzhiyun 	struct tlbe_priv *priv;
591*4882a593Smuzhiyun 	struct kvm_book3e_206_tlb_entry *gtlbe, stlbe;
592*4882a593Smuzhiyun 	int tlbsel = tlbsel_of(index);
593*4882a593Smuzhiyun 	int esel = esel_of(index);
594*4882a593Smuzhiyun 
595*4882a593Smuzhiyun 	gtlbe = get_entry(vcpu_e500, tlbsel, esel);
596*4882a593Smuzhiyun 
597*4882a593Smuzhiyun 	switch (tlbsel) {
598*4882a593Smuzhiyun 	case 0:
599*4882a593Smuzhiyun 		priv = &vcpu_e500->gtlb_priv[tlbsel][esel];
600*4882a593Smuzhiyun 
601*4882a593Smuzhiyun 		/* Triggers after clear_tlb_privs or on initial mapping */
602*4882a593Smuzhiyun 		if (!(priv->ref.flags & E500_TLB_VALID)) {
603*4882a593Smuzhiyun 			kvmppc_e500_tlb0_map(vcpu_e500, esel, &stlbe);
604*4882a593Smuzhiyun 		} else {
605*4882a593Smuzhiyun 			kvmppc_e500_setup_stlbe(vcpu, gtlbe, BOOK3E_PAGESZ_4K,
606*4882a593Smuzhiyun 						&priv->ref, eaddr, &stlbe);
607*4882a593Smuzhiyun 			write_stlbe(vcpu_e500, gtlbe, &stlbe, 0, 0);
608*4882a593Smuzhiyun 		}
609*4882a593Smuzhiyun 		break;
610*4882a593Smuzhiyun 
611*4882a593Smuzhiyun 	case 1: {
612*4882a593Smuzhiyun 		gfn_t gfn = gpaddr >> PAGE_SHIFT;
613*4882a593Smuzhiyun 		kvmppc_e500_tlb1_map(vcpu_e500, eaddr, gfn, gtlbe, &stlbe,
614*4882a593Smuzhiyun 				     esel);
615*4882a593Smuzhiyun 		break;
616*4882a593Smuzhiyun 	}
617*4882a593Smuzhiyun 
618*4882a593Smuzhiyun 	default:
619*4882a593Smuzhiyun 		BUG();
620*4882a593Smuzhiyun 		break;
621*4882a593Smuzhiyun 	}
622*4882a593Smuzhiyun }
623*4882a593Smuzhiyun 
624*4882a593Smuzhiyun #ifdef CONFIG_KVM_BOOKE_HV
kvmppc_load_last_inst(struct kvm_vcpu * vcpu,enum instruction_fetch_type type,u32 * instr)625*4882a593Smuzhiyun int kvmppc_load_last_inst(struct kvm_vcpu *vcpu,
626*4882a593Smuzhiyun 		enum instruction_fetch_type type, u32 *instr)
627*4882a593Smuzhiyun {
628*4882a593Smuzhiyun 	gva_t geaddr;
629*4882a593Smuzhiyun 	hpa_t addr;
630*4882a593Smuzhiyun 	hfn_t pfn;
631*4882a593Smuzhiyun 	hva_t eaddr;
632*4882a593Smuzhiyun 	u32 mas1, mas2, mas3;
633*4882a593Smuzhiyun 	u64 mas7_mas3;
634*4882a593Smuzhiyun 	struct page *page;
635*4882a593Smuzhiyun 	unsigned int addr_space, psize_shift;
636*4882a593Smuzhiyun 	bool pr;
637*4882a593Smuzhiyun 	unsigned long flags;
638*4882a593Smuzhiyun 
639*4882a593Smuzhiyun 	/* Search TLB for guest pc to get the real address */
640*4882a593Smuzhiyun 	geaddr = kvmppc_get_pc(vcpu);
641*4882a593Smuzhiyun 
642*4882a593Smuzhiyun 	addr_space = (vcpu->arch.shared->msr & MSR_IS) >> MSR_IR_LG;
643*4882a593Smuzhiyun 
644*4882a593Smuzhiyun 	local_irq_save(flags);
645*4882a593Smuzhiyun 	mtspr(SPRN_MAS6, (vcpu->arch.pid << MAS6_SPID_SHIFT) | addr_space);
646*4882a593Smuzhiyun 	mtspr(SPRN_MAS5, MAS5_SGS | get_lpid(vcpu));
647*4882a593Smuzhiyun 	asm volatile("tlbsx 0, %[geaddr]\n" : :
648*4882a593Smuzhiyun 		     [geaddr] "r" (geaddr));
649*4882a593Smuzhiyun 	mtspr(SPRN_MAS5, 0);
650*4882a593Smuzhiyun 	mtspr(SPRN_MAS8, 0);
651*4882a593Smuzhiyun 	mas1 = mfspr(SPRN_MAS1);
652*4882a593Smuzhiyun 	mas2 = mfspr(SPRN_MAS2);
653*4882a593Smuzhiyun 	mas3 = mfspr(SPRN_MAS3);
654*4882a593Smuzhiyun #ifdef CONFIG_64BIT
655*4882a593Smuzhiyun 	mas7_mas3 = mfspr(SPRN_MAS7_MAS3);
656*4882a593Smuzhiyun #else
657*4882a593Smuzhiyun 	mas7_mas3 = ((u64)mfspr(SPRN_MAS7) << 32) | mas3;
658*4882a593Smuzhiyun #endif
659*4882a593Smuzhiyun 	local_irq_restore(flags);
660*4882a593Smuzhiyun 
661*4882a593Smuzhiyun 	/*
662*4882a593Smuzhiyun 	 * If the TLB entry for guest pc was evicted, return to the guest.
663*4882a593Smuzhiyun 	 * There are high chances to find a valid TLB entry next time.
664*4882a593Smuzhiyun 	 */
665*4882a593Smuzhiyun 	if (!(mas1 & MAS1_VALID))
666*4882a593Smuzhiyun 		return EMULATE_AGAIN;
667*4882a593Smuzhiyun 
668*4882a593Smuzhiyun 	/*
669*4882a593Smuzhiyun 	 * Another thread may rewrite the TLB entry in parallel, don't
670*4882a593Smuzhiyun 	 * execute from the address if the execute permission is not set
671*4882a593Smuzhiyun 	 */
672*4882a593Smuzhiyun 	pr = vcpu->arch.shared->msr & MSR_PR;
673*4882a593Smuzhiyun 	if (unlikely((pr && !(mas3 & MAS3_UX)) ||
674*4882a593Smuzhiyun 		     (!pr && !(mas3 & MAS3_SX)))) {
675*4882a593Smuzhiyun 		pr_err_ratelimited(
676*4882a593Smuzhiyun 			"%s: Instruction emulation from guest address %08lx without execute permission\n",
677*4882a593Smuzhiyun 			__func__, geaddr);
678*4882a593Smuzhiyun 		return EMULATE_AGAIN;
679*4882a593Smuzhiyun 	}
680*4882a593Smuzhiyun 
681*4882a593Smuzhiyun 	/*
682*4882a593Smuzhiyun 	 * The real address will be mapped by a cacheable, memory coherent,
683*4882a593Smuzhiyun 	 * write-back page. Check for mismatches when LRAT is used.
684*4882a593Smuzhiyun 	 */
685*4882a593Smuzhiyun 	if (has_feature(vcpu, VCPU_FTR_MMU_V2) &&
686*4882a593Smuzhiyun 	    unlikely((mas2 & MAS2_I) || (mas2 & MAS2_W) || !(mas2 & MAS2_M))) {
687*4882a593Smuzhiyun 		pr_err_ratelimited(
688*4882a593Smuzhiyun 			"%s: Instruction emulation from guest address %08lx mismatches storage attributes\n",
689*4882a593Smuzhiyun 			__func__, geaddr);
690*4882a593Smuzhiyun 		return EMULATE_AGAIN;
691*4882a593Smuzhiyun 	}
692*4882a593Smuzhiyun 
693*4882a593Smuzhiyun 	/* Get pfn */
694*4882a593Smuzhiyun 	psize_shift = MAS1_GET_TSIZE(mas1) + 10;
695*4882a593Smuzhiyun 	addr = (mas7_mas3 & (~0ULL << psize_shift)) |
696*4882a593Smuzhiyun 	       (geaddr & ((1ULL << psize_shift) - 1ULL));
697*4882a593Smuzhiyun 	pfn = addr >> PAGE_SHIFT;
698*4882a593Smuzhiyun 
699*4882a593Smuzhiyun 	/* Guard against emulation from devices area */
700*4882a593Smuzhiyun 	if (unlikely(!page_is_ram(pfn))) {
701*4882a593Smuzhiyun 		pr_err_ratelimited("%s: Instruction emulation from non-RAM host address %08llx is not supported\n",
702*4882a593Smuzhiyun 			 __func__, addr);
703*4882a593Smuzhiyun 		return EMULATE_AGAIN;
704*4882a593Smuzhiyun 	}
705*4882a593Smuzhiyun 
706*4882a593Smuzhiyun 	/* Map a page and get guest's instruction */
707*4882a593Smuzhiyun 	page = pfn_to_page(pfn);
708*4882a593Smuzhiyun 	eaddr = (unsigned long)kmap_atomic(page);
709*4882a593Smuzhiyun 	*instr = *(u32 *)(eaddr | (unsigned long)(addr & ~PAGE_MASK));
710*4882a593Smuzhiyun 	kunmap_atomic((u32 *)eaddr);
711*4882a593Smuzhiyun 
712*4882a593Smuzhiyun 	return EMULATE_DONE;
713*4882a593Smuzhiyun }
714*4882a593Smuzhiyun #else
kvmppc_load_last_inst(struct kvm_vcpu * vcpu,enum instruction_fetch_type type,u32 * instr)715*4882a593Smuzhiyun int kvmppc_load_last_inst(struct kvm_vcpu *vcpu,
716*4882a593Smuzhiyun 		enum instruction_fetch_type type, u32 *instr)
717*4882a593Smuzhiyun {
718*4882a593Smuzhiyun 	return EMULATE_AGAIN;
719*4882a593Smuzhiyun }
720*4882a593Smuzhiyun #endif
721*4882a593Smuzhiyun 
722*4882a593Smuzhiyun /************* MMU Notifiers *************/
723*4882a593Smuzhiyun 
kvm_unmap_hva(struct kvm * kvm,unsigned long hva)724*4882a593Smuzhiyun static int kvm_unmap_hva(struct kvm *kvm, unsigned long hva)
725*4882a593Smuzhiyun {
726*4882a593Smuzhiyun 	trace_kvm_unmap_hva(hva);
727*4882a593Smuzhiyun 
728*4882a593Smuzhiyun 	/*
729*4882a593Smuzhiyun 	 * Flush all shadow tlb entries everywhere. This is slow, but
730*4882a593Smuzhiyun 	 * we are 100% sure that we catch the to be unmapped page
731*4882a593Smuzhiyun 	 */
732*4882a593Smuzhiyun 	kvm_flush_remote_tlbs(kvm);
733*4882a593Smuzhiyun 
734*4882a593Smuzhiyun 	return 0;
735*4882a593Smuzhiyun }
736*4882a593Smuzhiyun 
kvm_unmap_hva_range(struct kvm * kvm,unsigned long start,unsigned long end,unsigned flags)737*4882a593Smuzhiyun int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end,
738*4882a593Smuzhiyun 			unsigned flags)
739*4882a593Smuzhiyun {
740*4882a593Smuzhiyun 	/* kvm_unmap_hva flushes everything anyways */
741*4882a593Smuzhiyun 	kvm_unmap_hva(kvm, start);
742*4882a593Smuzhiyun 
743*4882a593Smuzhiyun 	return 0;
744*4882a593Smuzhiyun }
745*4882a593Smuzhiyun 
kvm_age_hva(struct kvm * kvm,unsigned long start,unsigned long end)746*4882a593Smuzhiyun int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end)
747*4882a593Smuzhiyun {
748*4882a593Smuzhiyun 	/* XXX could be more clever ;) */
749*4882a593Smuzhiyun 	return 0;
750*4882a593Smuzhiyun }
751*4882a593Smuzhiyun 
kvm_test_age_hva(struct kvm * kvm,unsigned long hva)752*4882a593Smuzhiyun int kvm_test_age_hva(struct kvm *kvm, unsigned long hva)
753*4882a593Smuzhiyun {
754*4882a593Smuzhiyun 	/* XXX could be more clever ;) */
755*4882a593Smuzhiyun 	return 0;
756*4882a593Smuzhiyun }
757*4882a593Smuzhiyun 
kvm_set_spte_hva(struct kvm * kvm,unsigned long hva,pte_t pte)758*4882a593Smuzhiyun int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte)
759*4882a593Smuzhiyun {
760*4882a593Smuzhiyun 	/* The page will get remapped properly on its next fault */
761*4882a593Smuzhiyun 	kvm_unmap_hva(kvm, hva);
762*4882a593Smuzhiyun 	return 0;
763*4882a593Smuzhiyun }
764*4882a593Smuzhiyun 
765*4882a593Smuzhiyun /*****************************************/
766*4882a593Smuzhiyun 
e500_mmu_host_init(struct kvmppc_vcpu_e500 * vcpu_e500)767*4882a593Smuzhiyun int e500_mmu_host_init(struct kvmppc_vcpu_e500 *vcpu_e500)
768*4882a593Smuzhiyun {
769*4882a593Smuzhiyun 	host_tlb_params[0].entries = mfspr(SPRN_TLB0CFG) & TLBnCFG_N_ENTRY;
770*4882a593Smuzhiyun 	host_tlb_params[1].entries = mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY;
771*4882a593Smuzhiyun 
772*4882a593Smuzhiyun 	/*
773*4882a593Smuzhiyun 	 * This should never happen on real e500 hardware, but is
774*4882a593Smuzhiyun 	 * architecturally possible -- e.g. in some weird nested
775*4882a593Smuzhiyun 	 * virtualization case.
776*4882a593Smuzhiyun 	 */
777*4882a593Smuzhiyun 	if (host_tlb_params[0].entries == 0 ||
778*4882a593Smuzhiyun 	    host_tlb_params[1].entries == 0) {
779*4882a593Smuzhiyun 		pr_err("%s: need to know host tlb size\n", __func__);
780*4882a593Smuzhiyun 		return -ENODEV;
781*4882a593Smuzhiyun 	}
782*4882a593Smuzhiyun 
783*4882a593Smuzhiyun 	host_tlb_params[0].ways = (mfspr(SPRN_TLB0CFG) & TLBnCFG_ASSOC) >>
784*4882a593Smuzhiyun 				  TLBnCFG_ASSOC_SHIFT;
785*4882a593Smuzhiyun 	host_tlb_params[1].ways = host_tlb_params[1].entries;
786*4882a593Smuzhiyun 
787*4882a593Smuzhiyun 	if (!is_power_of_2(host_tlb_params[0].entries) ||
788*4882a593Smuzhiyun 	    !is_power_of_2(host_tlb_params[0].ways) ||
789*4882a593Smuzhiyun 	    host_tlb_params[0].entries < host_tlb_params[0].ways ||
790*4882a593Smuzhiyun 	    host_tlb_params[0].ways == 0) {
791*4882a593Smuzhiyun 		pr_err("%s: bad tlb0 host config: %u entries %u ways\n",
792*4882a593Smuzhiyun 		       __func__, host_tlb_params[0].entries,
793*4882a593Smuzhiyun 		       host_tlb_params[0].ways);
794*4882a593Smuzhiyun 		return -ENODEV;
795*4882a593Smuzhiyun 	}
796*4882a593Smuzhiyun 
797*4882a593Smuzhiyun 	host_tlb_params[0].sets =
798*4882a593Smuzhiyun 		host_tlb_params[0].entries / host_tlb_params[0].ways;
799*4882a593Smuzhiyun 	host_tlb_params[1].sets = 1;
800*4882a593Smuzhiyun 	vcpu_e500->h2g_tlb1_rmap = kcalloc(host_tlb_params[1].entries,
801*4882a593Smuzhiyun 					   sizeof(*vcpu_e500->h2g_tlb1_rmap),
802*4882a593Smuzhiyun 					   GFP_KERNEL);
803*4882a593Smuzhiyun 	if (!vcpu_e500->h2g_tlb1_rmap)
804*4882a593Smuzhiyun 		return -EINVAL;
805*4882a593Smuzhiyun 
806*4882a593Smuzhiyun 	return 0;
807*4882a593Smuzhiyun }
808*4882a593Smuzhiyun 
e500_mmu_host_uninit(struct kvmppc_vcpu_e500 * vcpu_e500)809*4882a593Smuzhiyun void e500_mmu_host_uninit(struct kvmppc_vcpu_e500 *vcpu_e500)
810*4882a593Smuzhiyun {
811*4882a593Smuzhiyun 	kfree(vcpu_e500->h2g_tlb1_rmap);
812*4882a593Smuzhiyun }
813