xref: /OK3568_Linux_fs/kernel/arch/powerpc/kvm/book3s_32_mmu.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  *
4*4882a593Smuzhiyun  * Copyright SUSE Linux Products GmbH 2009
5*4882a593Smuzhiyun  *
6*4882a593Smuzhiyun  * Authors: Alexander Graf <agraf@suse.de>
7*4882a593Smuzhiyun  */
8*4882a593Smuzhiyun 
9*4882a593Smuzhiyun #include <linux/types.h>
10*4882a593Smuzhiyun #include <linux/string.h>
11*4882a593Smuzhiyun #include <linux/kvm.h>
12*4882a593Smuzhiyun #include <linux/kvm_host.h>
13*4882a593Smuzhiyun #include <linux/highmem.h>
14*4882a593Smuzhiyun 
15*4882a593Smuzhiyun #include <asm/kvm_ppc.h>
16*4882a593Smuzhiyun #include <asm/kvm_book3s.h>
17*4882a593Smuzhiyun 
18*4882a593Smuzhiyun /* #define DEBUG_MMU */
19*4882a593Smuzhiyun /* #define DEBUG_MMU_PTE */
20*4882a593Smuzhiyun /* #define DEBUG_MMU_PTE_IP 0xfff14c40 */
21*4882a593Smuzhiyun 
22*4882a593Smuzhiyun #ifdef DEBUG_MMU
23*4882a593Smuzhiyun #define dprintk(X...) printk(KERN_INFO X)
24*4882a593Smuzhiyun #else
25*4882a593Smuzhiyun #define dprintk(X...) do { } while(0)
26*4882a593Smuzhiyun #endif
27*4882a593Smuzhiyun 
28*4882a593Smuzhiyun #ifdef DEBUG_MMU_PTE
29*4882a593Smuzhiyun #define dprintk_pte(X...) printk(KERN_INFO X)
30*4882a593Smuzhiyun #else
31*4882a593Smuzhiyun #define dprintk_pte(X...) do { } while(0)
32*4882a593Smuzhiyun #endif
33*4882a593Smuzhiyun 
34*4882a593Smuzhiyun #define PTEG_FLAG_ACCESSED	0x00000100
35*4882a593Smuzhiyun #define PTEG_FLAG_DIRTY		0x00000080
36*4882a593Smuzhiyun #ifndef SID_SHIFT
37*4882a593Smuzhiyun #define SID_SHIFT		28
38*4882a593Smuzhiyun #endif
39*4882a593Smuzhiyun 
check_debug_ip(struct kvm_vcpu * vcpu)40*4882a593Smuzhiyun static inline bool check_debug_ip(struct kvm_vcpu *vcpu)
41*4882a593Smuzhiyun {
42*4882a593Smuzhiyun #ifdef DEBUG_MMU_PTE_IP
43*4882a593Smuzhiyun 	return vcpu->arch.regs.nip == DEBUG_MMU_PTE_IP;
44*4882a593Smuzhiyun #else
45*4882a593Smuzhiyun 	return true;
46*4882a593Smuzhiyun #endif
47*4882a593Smuzhiyun }
48*4882a593Smuzhiyun 
sr_vsid(u32 sr_raw)49*4882a593Smuzhiyun static inline u32 sr_vsid(u32 sr_raw)
50*4882a593Smuzhiyun {
51*4882a593Smuzhiyun 	return sr_raw & 0x0fffffff;
52*4882a593Smuzhiyun }
53*4882a593Smuzhiyun 
sr_valid(u32 sr_raw)54*4882a593Smuzhiyun static inline bool sr_valid(u32 sr_raw)
55*4882a593Smuzhiyun {
56*4882a593Smuzhiyun 	return (sr_raw & 0x80000000) ? false : true;
57*4882a593Smuzhiyun }
58*4882a593Smuzhiyun 
sr_ks(u32 sr_raw)59*4882a593Smuzhiyun static inline bool sr_ks(u32 sr_raw)
60*4882a593Smuzhiyun {
61*4882a593Smuzhiyun 	return (sr_raw & 0x40000000) ? true: false;
62*4882a593Smuzhiyun }
63*4882a593Smuzhiyun 
sr_kp(u32 sr_raw)64*4882a593Smuzhiyun static inline bool sr_kp(u32 sr_raw)
65*4882a593Smuzhiyun {
66*4882a593Smuzhiyun 	return (sr_raw & 0x20000000) ? true: false;
67*4882a593Smuzhiyun }
68*4882a593Smuzhiyun 
69*4882a593Smuzhiyun static int kvmppc_mmu_book3s_32_xlate_bat(struct kvm_vcpu *vcpu, gva_t eaddr,
70*4882a593Smuzhiyun 					  struct kvmppc_pte *pte, bool data,
71*4882a593Smuzhiyun 					  bool iswrite);
72*4882a593Smuzhiyun static int kvmppc_mmu_book3s_32_esid_to_vsid(struct kvm_vcpu *vcpu, ulong esid,
73*4882a593Smuzhiyun 					     u64 *vsid);
74*4882a593Smuzhiyun 
find_sr(struct kvm_vcpu * vcpu,gva_t eaddr)75*4882a593Smuzhiyun static u32 find_sr(struct kvm_vcpu *vcpu, gva_t eaddr)
76*4882a593Smuzhiyun {
77*4882a593Smuzhiyun 	return kvmppc_get_sr(vcpu, (eaddr >> 28) & 0xf);
78*4882a593Smuzhiyun }
79*4882a593Smuzhiyun 
kvmppc_mmu_book3s_32_ea_to_vp(struct kvm_vcpu * vcpu,gva_t eaddr,bool data)80*4882a593Smuzhiyun static u64 kvmppc_mmu_book3s_32_ea_to_vp(struct kvm_vcpu *vcpu, gva_t eaddr,
81*4882a593Smuzhiyun 					 bool data)
82*4882a593Smuzhiyun {
83*4882a593Smuzhiyun 	u64 vsid;
84*4882a593Smuzhiyun 	struct kvmppc_pte pte;
85*4882a593Smuzhiyun 
86*4882a593Smuzhiyun 	if (!kvmppc_mmu_book3s_32_xlate_bat(vcpu, eaddr, &pte, data, false))
87*4882a593Smuzhiyun 		return pte.vpage;
88*4882a593Smuzhiyun 
89*4882a593Smuzhiyun 	kvmppc_mmu_book3s_32_esid_to_vsid(vcpu, eaddr >> SID_SHIFT, &vsid);
90*4882a593Smuzhiyun 	return (((u64)eaddr >> 12) & 0xffff) | (vsid << 16);
91*4882a593Smuzhiyun }
92*4882a593Smuzhiyun 
kvmppc_mmu_book3s_32_get_pteg(struct kvm_vcpu * vcpu,u32 sre,gva_t eaddr,bool primary)93*4882a593Smuzhiyun static hva_t kvmppc_mmu_book3s_32_get_pteg(struct kvm_vcpu *vcpu,
94*4882a593Smuzhiyun 				      u32 sre, gva_t eaddr,
95*4882a593Smuzhiyun 				      bool primary)
96*4882a593Smuzhiyun {
97*4882a593Smuzhiyun 	struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu);
98*4882a593Smuzhiyun 	u32 page, hash, pteg, htabmask;
99*4882a593Smuzhiyun 	hva_t r;
100*4882a593Smuzhiyun 
101*4882a593Smuzhiyun 	page = (eaddr & 0x0FFFFFFF) >> 12;
102*4882a593Smuzhiyun 	htabmask = ((vcpu_book3s->sdr1 & 0x1FF) << 16) | 0xFFC0;
103*4882a593Smuzhiyun 
104*4882a593Smuzhiyun 	hash = ((sr_vsid(sre) ^ page) << 6);
105*4882a593Smuzhiyun 	if (!primary)
106*4882a593Smuzhiyun 		hash = ~hash;
107*4882a593Smuzhiyun 	hash &= htabmask;
108*4882a593Smuzhiyun 
109*4882a593Smuzhiyun 	pteg = (vcpu_book3s->sdr1 & 0xffff0000) | hash;
110*4882a593Smuzhiyun 
111*4882a593Smuzhiyun 	dprintk("MMU: pc=0x%lx eaddr=0x%lx sdr1=0x%llx pteg=0x%x vsid=0x%x\n",
112*4882a593Smuzhiyun 		kvmppc_get_pc(vcpu), eaddr, vcpu_book3s->sdr1, pteg,
113*4882a593Smuzhiyun 		sr_vsid(sre));
114*4882a593Smuzhiyun 
115*4882a593Smuzhiyun 	r = gfn_to_hva(vcpu->kvm, pteg >> PAGE_SHIFT);
116*4882a593Smuzhiyun 	if (kvm_is_error_hva(r))
117*4882a593Smuzhiyun 		return r;
118*4882a593Smuzhiyun 	return r | (pteg & ~PAGE_MASK);
119*4882a593Smuzhiyun }
120*4882a593Smuzhiyun 
kvmppc_mmu_book3s_32_get_ptem(u32 sre,gva_t eaddr,bool primary)121*4882a593Smuzhiyun static u32 kvmppc_mmu_book3s_32_get_ptem(u32 sre, gva_t eaddr, bool primary)
122*4882a593Smuzhiyun {
123*4882a593Smuzhiyun 	return ((eaddr & 0x0fffffff) >> 22) | (sr_vsid(sre) << 7) |
124*4882a593Smuzhiyun 	       (primary ? 0 : 0x40) | 0x80000000;
125*4882a593Smuzhiyun }
126*4882a593Smuzhiyun 
kvmppc_mmu_book3s_32_xlate_bat(struct kvm_vcpu * vcpu,gva_t eaddr,struct kvmppc_pte * pte,bool data,bool iswrite)127*4882a593Smuzhiyun static int kvmppc_mmu_book3s_32_xlate_bat(struct kvm_vcpu *vcpu, gva_t eaddr,
128*4882a593Smuzhiyun 					  struct kvmppc_pte *pte, bool data,
129*4882a593Smuzhiyun 					  bool iswrite)
130*4882a593Smuzhiyun {
131*4882a593Smuzhiyun 	struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu);
132*4882a593Smuzhiyun 	struct kvmppc_bat *bat;
133*4882a593Smuzhiyun 	int i;
134*4882a593Smuzhiyun 
135*4882a593Smuzhiyun 	for (i = 0; i < 8; i++) {
136*4882a593Smuzhiyun 		if (data)
137*4882a593Smuzhiyun 			bat = &vcpu_book3s->dbat[i];
138*4882a593Smuzhiyun 		else
139*4882a593Smuzhiyun 			bat = &vcpu_book3s->ibat[i];
140*4882a593Smuzhiyun 
141*4882a593Smuzhiyun 		if (kvmppc_get_msr(vcpu) & MSR_PR) {
142*4882a593Smuzhiyun 			if (!bat->vp)
143*4882a593Smuzhiyun 				continue;
144*4882a593Smuzhiyun 		} else {
145*4882a593Smuzhiyun 			if (!bat->vs)
146*4882a593Smuzhiyun 				continue;
147*4882a593Smuzhiyun 		}
148*4882a593Smuzhiyun 
149*4882a593Smuzhiyun 		if (check_debug_ip(vcpu))
150*4882a593Smuzhiyun 		{
151*4882a593Smuzhiyun 			dprintk_pte("%cBAT %02d: 0x%lx - 0x%x (0x%x)\n",
152*4882a593Smuzhiyun 				    data ? 'd' : 'i', i, eaddr, bat->bepi,
153*4882a593Smuzhiyun 				    bat->bepi_mask);
154*4882a593Smuzhiyun 		}
155*4882a593Smuzhiyun 		if ((eaddr & bat->bepi_mask) == bat->bepi) {
156*4882a593Smuzhiyun 			u64 vsid;
157*4882a593Smuzhiyun 			kvmppc_mmu_book3s_32_esid_to_vsid(vcpu,
158*4882a593Smuzhiyun 				eaddr >> SID_SHIFT, &vsid);
159*4882a593Smuzhiyun 			vsid <<= 16;
160*4882a593Smuzhiyun 			pte->vpage = (((u64)eaddr >> 12) & 0xffff) | vsid;
161*4882a593Smuzhiyun 
162*4882a593Smuzhiyun 			pte->raddr = bat->brpn | (eaddr & ~bat->bepi_mask);
163*4882a593Smuzhiyun 			pte->may_read = bat->pp;
164*4882a593Smuzhiyun 			pte->may_write = bat->pp > 1;
165*4882a593Smuzhiyun 			pte->may_execute = true;
166*4882a593Smuzhiyun 			if (!pte->may_read) {
167*4882a593Smuzhiyun 				printk(KERN_INFO "BAT is not readable!\n");
168*4882a593Smuzhiyun 				continue;
169*4882a593Smuzhiyun 			}
170*4882a593Smuzhiyun 			if (iswrite && !pte->may_write) {
171*4882a593Smuzhiyun 				dprintk_pte("BAT is read-only!\n");
172*4882a593Smuzhiyun 				continue;
173*4882a593Smuzhiyun 			}
174*4882a593Smuzhiyun 
175*4882a593Smuzhiyun 			return 0;
176*4882a593Smuzhiyun 		}
177*4882a593Smuzhiyun 	}
178*4882a593Smuzhiyun 
179*4882a593Smuzhiyun 	return -ENOENT;
180*4882a593Smuzhiyun }
181*4882a593Smuzhiyun 
kvmppc_mmu_book3s_32_xlate_pte(struct kvm_vcpu * vcpu,gva_t eaddr,struct kvmppc_pte * pte,bool data,bool iswrite,bool primary)182*4882a593Smuzhiyun static int kvmppc_mmu_book3s_32_xlate_pte(struct kvm_vcpu *vcpu, gva_t eaddr,
183*4882a593Smuzhiyun 				     struct kvmppc_pte *pte, bool data,
184*4882a593Smuzhiyun 				     bool iswrite, bool primary)
185*4882a593Smuzhiyun {
186*4882a593Smuzhiyun 	u32 sre;
187*4882a593Smuzhiyun 	hva_t ptegp;
188*4882a593Smuzhiyun 	u32 pteg[16];
189*4882a593Smuzhiyun 	u32 pte0, pte1;
190*4882a593Smuzhiyun 	u32 ptem = 0;
191*4882a593Smuzhiyun 	int i;
192*4882a593Smuzhiyun 	int found = 0;
193*4882a593Smuzhiyun 
194*4882a593Smuzhiyun 	sre = find_sr(vcpu, eaddr);
195*4882a593Smuzhiyun 
196*4882a593Smuzhiyun 	dprintk_pte("SR 0x%lx: vsid=0x%x, raw=0x%x\n", eaddr >> 28,
197*4882a593Smuzhiyun 		    sr_vsid(sre), sre);
198*4882a593Smuzhiyun 
199*4882a593Smuzhiyun 	pte->vpage = kvmppc_mmu_book3s_32_ea_to_vp(vcpu, eaddr, data);
200*4882a593Smuzhiyun 
201*4882a593Smuzhiyun 	ptegp = kvmppc_mmu_book3s_32_get_pteg(vcpu, sre, eaddr, primary);
202*4882a593Smuzhiyun 	if (kvm_is_error_hva(ptegp)) {
203*4882a593Smuzhiyun 		printk(KERN_INFO "KVM: Invalid PTEG!\n");
204*4882a593Smuzhiyun 		goto no_page_found;
205*4882a593Smuzhiyun 	}
206*4882a593Smuzhiyun 
207*4882a593Smuzhiyun 	ptem = kvmppc_mmu_book3s_32_get_ptem(sre, eaddr, primary);
208*4882a593Smuzhiyun 
209*4882a593Smuzhiyun 	if(copy_from_user(pteg, (void __user *)ptegp, sizeof(pteg))) {
210*4882a593Smuzhiyun 		printk_ratelimited(KERN_ERR
211*4882a593Smuzhiyun 			"KVM: Can't copy data from 0x%lx!\n", ptegp);
212*4882a593Smuzhiyun 		goto no_page_found;
213*4882a593Smuzhiyun 	}
214*4882a593Smuzhiyun 
215*4882a593Smuzhiyun 	for (i=0; i<16; i+=2) {
216*4882a593Smuzhiyun 		pte0 = be32_to_cpu(pteg[i]);
217*4882a593Smuzhiyun 		pte1 = be32_to_cpu(pteg[i + 1]);
218*4882a593Smuzhiyun 		if (ptem == pte0) {
219*4882a593Smuzhiyun 			u8 pp;
220*4882a593Smuzhiyun 
221*4882a593Smuzhiyun 			pte->raddr = (pte1 & ~(0xFFFULL)) | (eaddr & 0xFFF);
222*4882a593Smuzhiyun 			pp = pte1 & 3;
223*4882a593Smuzhiyun 
224*4882a593Smuzhiyun 			if ((sr_kp(sre) &&  (kvmppc_get_msr(vcpu) & MSR_PR)) ||
225*4882a593Smuzhiyun 			    (sr_ks(sre) && !(kvmppc_get_msr(vcpu) & MSR_PR)))
226*4882a593Smuzhiyun 				pp |= 4;
227*4882a593Smuzhiyun 
228*4882a593Smuzhiyun 			pte->may_write = false;
229*4882a593Smuzhiyun 			pte->may_read = false;
230*4882a593Smuzhiyun 			pte->may_execute = true;
231*4882a593Smuzhiyun 			switch (pp) {
232*4882a593Smuzhiyun 				case 0:
233*4882a593Smuzhiyun 				case 1:
234*4882a593Smuzhiyun 				case 2:
235*4882a593Smuzhiyun 				case 6:
236*4882a593Smuzhiyun 					pte->may_write = true;
237*4882a593Smuzhiyun 					fallthrough;
238*4882a593Smuzhiyun 				case 3:
239*4882a593Smuzhiyun 				case 5:
240*4882a593Smuzhiyun 				case 7:
241*4882a593Smuzhiyun 					pte->may_read = true;
242*4882a593Smuzhiyun 					break;
243*4882a593Smuzhiyun 			}
244*4882a593Smuzhiyun 
245*4882a593Smuzhiyun 			dprintk_pte("MMU: Found PTE -> %x %x - %x\n",
246*4882a593Smuzhiyun 				    pte0, pte1, pp);
247*4882a593Smuzhiyun 			found = 1;
248*4882a593Smuzhiyun 			break;
249*4882a593Smuzhiyun 		}
250*4882a593Smuzhiyun 	}
251*4882a593Smuzhiyun 
252*4882a593Smuzhiyun 	/* Update PTE C and A bits, so the guest's swapper knows we used the
253*4882a593Smuzhiyun 	   page */
254*4882a593Smuzhiyun 	if (found) {
255*4882a593Smuzhiyun 		u32 pte_r = pte1;
256*4882a593Smuzhiyun 		char __user *addr = (char __user *) (ptegp + (i+1) * sizeof(u32));
257*4882a593Smuzhiyun 
258*4882a593Smuzhiyun 		/*
259*4882a593Smuzhiyun 		 * Use single-byte writes to update the HPTE, to
260*4882a593Smuzhiyun 		 * conform to what real hardware does.
261*4882a593Smuzhiyun 		 */
262*4882a593Smuzhiyun 		if (pte->may_read && !(pte_r & PTEG_FLAG_ACCESSED)) {
263*4882a593Smuzhiyun 			pte_r |= PTEG_FLAG_ACCESSED;
264*4882a593Smuzhiyun 			put_user(pte_r >> 8, addr + 2);
265*4882a593Smuzhiyun 		}
266*4882a593Smuzhiyun 		if (iswrite && pte->may_write && !(pte_r & PTEG_FLAG_DIRTY)) {
267*4882a593Smuzhiyun 			pte_r |= PTEG_FLAG_DIRTY;
268*4882a593Smuzhiyun 			put_user(pte_r, addr + 3);
269*4882a593Smuzhiyun 		}
270*4882a593Smuzhiyun 		if (!pte->may_read || (iswrite && !pte->may_write))
271*4882a593Smuzhiyun 			return -EPERM;
272*4882a593Smuzhiyun 		return 0;
273*4882a593Smuzhiyun 	}
274*4882a593Smuzhiyun 
275*4882a593Smuzhiyun no_page_found:
276*4882a593Smuzhiyun 
277*4882a593Smuzhiyun 	if (check_debug_ip(vcpu)) {
278*4882a593Smuzhiyun 		dprintk_pte("KVM MMU: No PTE found (sdr1=0x%llx ptegp=0x%lx)\n",
279*4882a593Smuzhiyun 			    to_book3s(vcpu)->sdr1, ptegp);
280*4882a593Smuzhiyun 		for (i=0; i<16; i+=2) {
281*4882a593Smuzhiyun 			dprintk_pte("   %02d: 0x%x - 0x%x (0x%x)\n",
282*4882a593Smuzhiyun 				    i, be32_to_cpu(pteg[i]),
283*4882a593Smuzhiyun 				    be32_to_cpu(pteg[i+1]), ptem);
284*4882a593Smuzhiyun 		}
285*4882a593Smuzhiyun 	}
286*4882a593Smuzhiyun 
287*4882a593Smuzhiyun 	return -ENOENT;
288*4882a593Smuzhiyun }
289*4882a593Smuzhiyun 
kvmppc_mmu_book3s_32_xlate(struct kvm_vcpu * vcpu,gva_t eaddr,struct kvmppc_pte * pte,bool data,bool iswrite)290*4882a593Smuzhiyun static int kvmppc_mmu_book3s_32_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
291*4882a593Smuzhiyun 				      struct kvmppc_pte *pte, bool data,
292*4882a593Smuzhiyun 				      bool iswrite)
293*4882a593Smuzhiyun {
294*4882a593Smuzhiyun 	int r;
295*4882a593Smuzhiyun 	ulong mp_ea = vcpu->arch.magic_page_ea;
296*4882a593Smuzhiyun 
297*4882a593Smuzhiyun 	pte->eaddr = eaddr;
298*4882a593Smuzhiyun 	pte->page_size = MMU_PAGE_4K;
299*4882a593Smuzhiyun 
300*4882a593Smuzhiyun 	/* Magic page override */
301*4882a593Smuzhiyun 	if (unlikely(mp_ea) &&
302*4882a593Smuzhiyun 	    unlikely((eaddr & ~0xfffULL) == (mp_ea & ~0xfffULL)) &&
303*4882a593Smuzhiyun 	    !(kvmppc_get_msr(vcpu) & MSR_PR)) {
304*4882a593Smuzhiyun 		pte->vpage = kvmppc_mmu_book3s_32_ea_to_vp(vcpu, eaddr, data);
305*4882a593Smuzhiyun 		pte->raddr = vcpu->arch.magic_page_pa | (pte->raddr & 0xfff);
306*4882a593Smuzhiyun 		pte->raddr &= KVM_PAM;
307*4882a593Smuzhiyun 		pte->may_execute = true;
308*4882a593Smuzhiyun 		pte->may_read = true;
309*4882a593Smuzhiyun 		pte->may_write = true;
310*4882a593Smuzhiyun 
311*4882a593Smuzhiyun 		return 0;
312*4882a593Smuzhiyun 	}
313*4882a593Smuzhiyun 
314*4882a593Smuzhiyun 	r = kvmppc_mmu_book3s_32_xlate_bat(vcpu, eaddr, pte, data, iswrite);
315*4882a593Smuzhiyun 	if (r < 0)
316*4882a593Smuzhiyun 		r = kvmppc_mmu_book3s_32_xlate_pte(vcpu, eaddr, pte,
317*4882a593Smuzhiyun 						   data, iswrite, true);
318*4882a593Smuzhiyun 	if (r == -ENOENT)
319*4882a593Smuzhiyun 		r = kvmppc_mmu_book3s_32_xlate_pte(vcpu, eaddr, pte,
320*4882a593Smuzhiyun 						   data, iswrite, false);
321*4882a593Smuzhiyun 
322*4882a593Smuzhiyun 	return r;
323*4882a593Smuzhiyun }
324*4882a593Smuzhiyun 
325*4882a593Smuzhiyun 
kvmppc_mmu_book3s_32_mfsrin(struct kvm_vcpu * vcpu,u32 srnum)326*4882a593Smuzhiyun static u32 kvmppc_mmu_book3s_32_mfsrin(struct kvm_vcpu *vcpu, u32 srnum)
327*4882a593Smuzhiyun {
328*4882a593Smuzhiyun 	return kvmppc_get_sr(vcpu, srnum);
329*4882a593Smuzhiyun }
330*4882a593Smuzhiyun 
kvmppc_mmu_book3s_32_mtsrin(struct kvm_vcpu * vcpu,u32 srnum,ulong value)331*4882a593Smuzhiyun static void kvmppc_mmu_book3s_32_mtsrin(struct kvm_vcpu *vcpu, u32 srnum,
332*4882a593Smuzhiyun 					ulong value)
333*4882a593Smuzhiyun {
334*4882a593Smuzhiyun 	kvmppc_set_sr(vcpu, srnum, value);
335*4882a593Smuzhiyun 	kvmppc_mmu_map_segment(vcpu, srnum << SID_SHIFT);
336*4882a593Smuzhiyun }
337*4882a593Smuzhiyun 
kvmppc_mmu_book3s_32_tlbie(struct kvm_vcpu * vcpu,ulong ea,bool large)338*4882a593Smuzhiyun static void kvmppc_mmu_book3s_32_tlbie(struct kvm_vcpu *vcpu, ulong ea, bool large)
339*4882a593Smuzhiyun {
340*4882a593Smuzhiyun 	int i;
341*4882a593Smuzhiyun 	struct kvm_vcpu *v;
342*4882a593Smuzhiyun 
343*4882a593Smuzhiyun 	/* flush this VA on all cpus */
344*4882a593Smuzhiyun 	kvm_for_each_vcpu(i, v, vcpu->kvm)
345*4882a593Smuzhiyun 		kvmppc_mmu_pte_flush(v, ea, 0x0FFFF000);
346*4882a593Smuzhiyun }
347*4882a593Smuzhiyun 
kvmppc_mmu_book3s_32_esid_to_vsid(struct kvm_vcpu * vcpu,ulong esid,u64 * vsid)348*4882a593Smuzhiyun static int kvmppc_mmu_book3s_32_esid_to_vsid(struct kvm_vcpu *vcpu, ulong esid,
349*4882a593Smuzhiyun 					     u64 *vsid)
350*4882a593Smuzhiyun {
351*4882a593Smuzhiyun 	ulong ea = esid << SID_SHIFT;
352*4882a593Smuzhiyun 	u32 sr;
353*4882a593Smuzhiyun 	u64 gvsid = esid;
354*4882a593Smuzhiyun 	u64 msr = kvmppc_get_msr(vcpu);
355*4882a593Smuzhiyun 
356*4882a593Smuzhiyun 	if (msr & (MSR_DR|MSR_IR)) {
357*4882a593Smuzhiyun 		sr = find_sr(vcpu, ea);
358*4882a593Smuzhiyun 		if (sr_valid(sr))
359*4882a593Smuzhiyun 			gvsid = sr_vsid(sr);
360*4882a593Smuzhiyun 	}
361*4882a593Smuzhiyun 
362*4882a593Smuzhiyun 	/* In case we only have one of MSR_IR or MSR_DR set, let's put
363*4882a593Smuzhiyun 	   that in the real-mode context (and hope RM doesn't access
364*4882a593Smuzhiyun 	   high memory) */
365*4882a593Smuzhiyun 	switch (msr & (MSR_DR|MSR_IR)) {
366*4882a593Smuzhiyun 	case 0:
367*4882a593Smuzhiyun 		*vsid = VSID_REAL | esid;
368*4882a593Smuzhiyun 		break;
369*4882a593Smuzhiyun 	case MSR_IR:
370*4882a593Smuzhiyun 		*vsid = VSID_REAL_IR | gvsid;
371*4882a593Smuzhiyun 		break;
372*4882a593Smuzhiyun 	case MSR_DR:
373*4882a593Smuzhiyun 		*vsid = VSID_REAL_DR | gvsid;
374*4882a593Smuzhiyun 		break;
375*4882a593Smuzhiyun 	case MSR_DR|MSR_IR:
376*4882a593Smuzhiyun 		if (sr_valid(sr))
377*4882a593Smuzhiyun 			*vsid = sr_vsid(sr);
378*4882a593Smuzhiyun 		else
379*4882a593Smuzhiyun 			*vsid = VSID_BAT | gvsid;
380*4882a593Smuzhiyun 		break;
381*4882a593Smuzhiyun 	default:
382*4882a593Smuzhiyun 		BUG();
383*4882a593Smuzhiyun 	}
384*4882a593Smuzhiyun 
385*4882a593Smuzhiyun 	if (msr & MSR_PR)
386*4882a593Smuzhiyun 		*vsid |= VSID_PR;
387*4882a593Smuzhiyun 
388*4882a593Smuzhiyun 	return 0;
389*4882a593Smuzhiyun }
390*4882a593Smuzhiyun 
kvmppc_mmu_book3s_32_is_dcbz32(struct kvm_vcpu * vcpu)391*4882a593Smuzhiyun static bool kvmppc_mmu_book3s_32_is_dcbz32(struct kvm_vcpu *vcpu)
392*4882a593Smuzhiyun {
393*4882a593Smuzhiyun 	return true;
394*4882a593Smuzhiyun }
395*4882a593Smuzhiyun 
396*4882a593Smuzhiyun 
kvmppc_mmu_book3s_32_init(struct kvm_vcpu * vcpu)397*4882a593Smuzhiyun void kvmppc_mmu_book3s_32_init(struct kvm_vcpu *vcpu)
398*4882a593Smuzhiyun {
399*4882a593Smuzhiyun 	struct kvmppc_mmu *mmu = &vcpu->arch.mmu;
400*4882a593Smuzhiyun 
401*4882a593Smuzhiyun 	mmu->mtsrin = kvmppc_mmu_book3s_32_mtsrin;
402*4882a593Smuzhiyun 	mmu->mfsrin = kvmppc_mmu_book3s_32_mfsrin;
403*4882a593Smuzhiyun 	mmu->xlate = kvmppc_mmu_book3s_32_xlate;
404*4882a593Smuzhiyun 	mmu->tlbie = kvmppc_mmu_book3s_32_tlbie;
405*4882a593Smuzhiyun 	mmu->esid_to_vsid = kvmppc_mmu_book3s_32_esid_to_vsid;
406*4882a593Smuzhiyun 	mmu->ea_to_vp = kvmppc_mmu_book3s_32_ea_to_vp;
407*4882a593Smuzhiyun 	mmu->is_dcbz32 = kvmppc_mmu_book3s_32_is_dcbz32;
408*4882a593Smuzhiyun 
409*4882a593Smuzhiyun 	mmu->slbmte = NULL;
410*4882a593Smuzhiyun 	mmu->slbmfee = NULL;
411*4882a593Smuzhiyun 	mmu->slbmfev = NULL;
412*4882a593Smuzhiyun 	mmu->slbfee = NULL;
413*4882a593Smuzhiyun 	mmu->slbie = NULL;
414*4882a593Smuzhiyun 	mmu->slbia = NULL;
415*4882a593Smuzhiyun }
416