1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Copyright (C) 2010 SUSE Linux Products GmbH. All rights reserved.
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * Authors:
6*4882a593Smuzhiyun * Alexander Graf <agraf@suse.de>
7*4882a593Smuzhiyun */
8*4882a593Smuzhiyun
9*4882a593Smuzhiyun #include <linux/kvm_host.h>
10*4882a593Smuzhiyun
11*4882a593Smuzhiyun #include <asm/kvm_ppc.h>
12*4882a593Smuzhiyun #include <asm/kvm_book3s.h>
13*4882a593Smuzhiyun #include <asm/book3s/32/mmu-hash.h>
14*4882a593Smuzhiyun #include <asm/machdep.h>
15*4882a593Smuzhiyun #include <asm/mmu_context.h>
16*4882a593Smuzhiyun #include <asm/hw_irq.h>
17*4882a593Smuzhiyun #include "book3s.h"
18*4882a593Smuzhiyun
19*4882a593Smuzhiyun /* #define DEBUG_MMU */
20*4882a593Smuzhiyun /* #define DEBUG_SR */
21*4882a593Smuzhiyun
22*4882a593Smuzhiyun #ifdef DEBUG_MMU
23*4882a593Smuzhiyun #define dprintk_mmu(a, ...) printk(KERN_INFO a, __VA_ARGS__)
24*4882a593Smuzhiyun #else
25*4882a593Smuzhiyun #define dprintk_mmu(a, ...) do { } while(0)
26*4882a593Smuzhiyun #endif
27*4882a593Smuzhiyun
28*4882a593Smuzhiyun #ifdef DEBUG_SR
29*4882a593Smuzhiyun #define dprintk_sr(a, ...) printk(KERN_INFO a, __VA_ARGS__)
30*4882a593Smuzhiyun #else
31*4882a593Smuzhiyun #define dprintk_sr(a, ...) do { } while(0)
32*4882a593Smuzhiyun #endif
33*4882a593Smuzhiyun
34*4882a593Smuzhiyun #if PAGE_SHIFT != 12
35*4882a593Smuzhiyun #error Unknown page size
36*4882a593Smuzhiyun #endif
37*4882a593Smuzhiyun
38*4882a593Smuzhiyun #ifdef CONFIG_SMP
39*4882a593Smuzhiyun #error XXX need to grab mmu_hash_lock
40*4882a593Smuzhiyun #endif
41*4882a593Smuzhiyun
42*4882a593Smuzhiyun #ifdef CONFIG_PTE_64BIT
43*4882a593Smuzhiyun #error Only 32 bit pages are supported for now
44*4882a593Smuzhiyun #endif
45*4882a593Smuzhiyun
46*4882a593Smuzhiyun static ulong htab;
47*4882a593Smuzhiyun static u32 htabmask;
48*4882a593Smuzhiyun
kvmppc_mmu_invalidate_pte(struct kvm_vcpu * vcpu,struct hpte_cache * pte)49*4882a593Smuzhiyun void kvmppc_mmu_invalidate_pte(struct kvm_vcpu *vcpu, struct hpte_cache *pte)
50*4882a593Smuzhiyun {
51*4882a593Smuzhiyun volatile u32 *pteg;
52*4882a593Smuzhiyun
53*4882a593Smuzhiyun /* Remove from host HTAB */
54*4882a593Smuzhiyun pteg = (u32*)pte->slot;
55*4882a593Smuzhiyun pteg[0] = 0;
56*4882a593Smuzhiyun
57*4882a593Smuzhiyun /* And make sure it's gone from the TLB too */
58*4882a593Smuzhiyun asm volatile ("sync");
59*4882a593Smuzhiyun asm volatile ("tlbie %0" : : "r" (pte->pte.eaddr) : "memory");
60*4882a593Smuzhiyun asm volatile ("sync");
61*4882a593Smuzhiyun asm volatile ("tlbsync");
62*4882a593Smuzhiyun }
63*4882a593Smuzhiyun
64*4882a593Smuzhiyun /* We keep 512 gvsid->hvsid entries, mapping the guest ones to the array using
65*4882a593Smuzhiyun * a hash, so we don't waste cycles on looping */
kvmppc_sid_hash(struct kvm_vcpu * vcpu,u64 gvsid)66*4882a593Smuzhiyun static u16 kvmppc_sid_hash(struct kvm_vcpu *vcpu, u64 gvsid)
67*4882a593Smuzhiyun {
68*4882a593Smuzhiyun return (u16)(((gvsid >> (SID_MAP_BITS * 7)) & SID_MAP_MASK) ^
69*4882a593Smuzhiyun ((gvsid >> (SID_MAP_BITS * 6)) & SID_MAP_MASK) ^
70*4882a593Smuzhiyun ((gvsid >> (SID_MAP_BITS * 5)) & SID_MAP_MASK) ^
71*4882a593Smuzhiyun ((gvsid >> (SID_MAP_BITS * 4)) & SID_MAP_MASK) ^
72*4882a593Smuzhiyun ((gvsid >> (SID_MAP_BITS * 3)) & SID_MAP_MASK) ^
73*4882a593Smuzhiyun ((gvsid >> (SID_MAP_BITS * 2)) & SID_MAP_MASK) ^
74*4882a593Smuzhiyun ((gvsid >> (SID_MAP_BITS * 1)) & SID_MAP_MASK) ^
75*4882a593Smuzhiyun ((gvsid >> (SID_MAP_BITS * 0)) & SID_MAP_MASK));
76*4882a593Smuzhiyun }
77*4882a593Smuzhiyun
78*4882a593Smuzhiyun
find_sid_vsid(struct kvm_vcpu * vcpu,u64 gvsid)79*4882a593Smuzhiyun static struct kvmppc_sid_map *find_sid_vsid(struct kvm_vcpu *vcpu, u64 gvsid)
80*4882a593Smuzhiyun {
81*4882a593Smuzhiyun struct kvmppc_sid_map *map;
82*4882a593Smuzhiyun u16 sid_map_mask;
83*4882a593Smuzhiyun
84*4882a593Smuzhiyun if (kvmppc_get_msr(vcpu) & MSR_PR)
85*4882a593Smuzhiyun gvsid |= VSID_PR;
86*4882a593Smuzhiyun
87*4882a593Smuzhiyun sid_map_mask = kvmppc_sid_hash(vcpu, gvsid);
88*4882a593Smuzhiyun map = &to_book3s(vcpu)->sid_map[sid_map_mask];
89*4882a593Smuzhiyun if (map->guest_vsid == gvsid) {
90*4882a593Smuzhiyun dprintk_sr("SR: Searching 0x%llx -> 0x%llx\n",
91*4882a593Smuzhiyun gvsid, map->host_vsid);
92*4882a593Smuzhiyun return map;
93*4882a593Smuzhiyun }
94*4882a593Smuzhiyun
95*4882a593Smuzhiyun map = &to_book3s(vcpu)->sid_map[SID_MAP_MASK - sid_map_mask];
96*4882a593Smuzhiyun if (map->guest_vsid == gvsid) {
97*4882a593Smuzhiyun dprintk_sr("SR: Searching 0x%llx -> 0x%llx\n",
98*4882a593Smuzhiyun gvsid, map->host_vsid);
99*4882a593Smuzhiyun return map;
100*4882a593Smuzhiyun }
101*4882a593Smuzhiyun
102*4882a593Smuzhiyun dprintk_sr("SR: Searching 0x%llx -> not found\n", gvsid);
103*4882a593Smuzhiyun return NULL;
104*4882a593Smuzhiyun }
105*4882a593Smuzhiyun
kvmppc_mmu_get_pteg(struct kvm_vcpu * vcpu,u32 vsid,u32 eaddr,bool primary)106*4882a593Smuzhiyun static u32 *kvmppc_mmu_get_pteg(struct kvm_vcpu *vcpu, u32 vsid, u32 eaddr,
107*4882a593Smuzhiyun bool primary)
108*4882a593Smuzhiyun {
109*4882a593Smuzhiyun u32 page, hash;
110*4882a593Smuzhiyun ulong pteg = htab;
111*4882a593Smuzhiyun
112*4882a593Smuzhiyun page = (eaddr & ~ESID_MASK) >> 12;
113*4882a593Smuzhiyun
114*4882a593Smuzhiyun hash = ((vsid ^ page) << 6);
115*4882a593Smuzhiyun if (!primary)
116*4882a593Smuzhiyun hash = ~hash;
117*4882a593Smuzhiyun
118*4882a593Smuzhiyun hash &= htabmask;
119*4882a593Smuzhiyun
120*4882a593Smuzhiyun pteg |= hash;
121*4882a593Smuzhiyun
122*4882a593Smuzhiyun dprintk_mmu("htab: %lx | hash: %x | htabmask: %x | pteg: %lx\n",
123*4882a593Smuzhiyun htab, hash, htabmask, pteg);
124*4882a593Smuzhiyun
125*4882a593Smuzhiyun return (u32*)pteg;
126*4882a593Smuzhiyun }
127*4882a593Smuzhiyun
128*4882a593Smuzhiyun extern char etext[];
129*4882a593Smuzhiyun
kvmppc_mmu_map_page(struct kvm_vcpu * vcpu,struct kvmppc_pte * orig_pte,bool iswrite)130*4882a593Smuzhiyun int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte,
131*4882a593Smuzhiyun bool iswrite)
132*4882a593Smuzhiyun {
133*4882a593Smuzhiyun kvm_pfn_t hpaddr;
134*4882a593Smuzhiyun u64 vpn;
135*4882a593Smuzhiyun u64 vsid;
136*4882a593Smuzhiyun struct kvmppc_sid_map *map;
137*4882a593Smuzhiyun volatile u32 *pteg;
138*4882a593Smuzhiyun u32 eaddr = orig_pte->eaddr;
139*4882a593Smuzhiyun u32 pteg0, pteg1;
140*4882a593Smuzhiyun register int rr = 0;
141*4882a593Smuzhiyun bool primary = false;
142*4882a593Smuzhiyun bool evict = false;
143*4882a593Smuzhiyun struct hpte_cache *pte;
144*4882a593Smuzhiyun int r = 0;
145*4882a593Smuzhiyun bool writable;
146*4882a593Smuzhiyun
147*4882a593Smuzhiyun /* Get host physical address for gpa */
148*4882a593Smuzhiyun hpaddr = kvmppc_gpa_to_pfn(vcpu, orig_pte->raddr, iswrite, &writable);
149*4882a593Smuzhiyun if (is_error_noslot_pfn(hpaddr)) {
150*4882a593Smuzhiyun printk(KERN_INFO "Couldn't get guest page for gpa %lx!\n",
151*4882a593Smuzhiyun orig_pte->raddr);
152*4882a593Smuzhiyun r = -EINVAL;
153*4882a593Smuzhiyun goto out;
154*4882a593Smuzhiyun }
155*4882a593Smuzhiyun hpaddr <<= PAGE_SHIFT;
156*4882a593Smuzhiyun
157*4882a593Smuzhiyun /* and write the mapping ea -> hpa into the pt */
158*4882a593Smuzhiyun vcpu->arch.mmu.esid_to_vsid(vcpu, orig_pte->eaddr >> SID_SHIFT, &vsid);
159*4882a593Smuzhiyun map = find_sid_vsid(vcpu, vsid);
160*4882a593Smuzhiyun if (!map) {
161*4882a593Smuzhiyun kvmppc_mmu_map_segment(vcpu, eaddr);
162*4882a593Smuzhiyun map = find_sid_vsid(vcpu, vsid);
163*4882a593Smuzhiyun }
164*4882a593Smuzhiyun BUG_ON(!map);
165*4882a593Smuzhiyun
166*4882a593Smuzhiyun vsid = map->host_vsid;
167*4882a593Smuzhiyun vpn = (vsid << (SID_SHIFT - VPN_SHIFT)) |
168*4882a593Smuzhiyun ((eaddr & ~ESID_MASK) >> VPN_SHIFT);
169*4882a593Smuzhiyun next_pteg:
170*4882a593Smuzhiyun if (rr == 16) {
171*4882a593Smuzhiyun primary = !primary;
172*4882a593Smuzhiyun evict = true;
173*4882a593Smuzhiyun rr = 0;
174*4882a593Smuzhiyun }
175*4882a593Smuzhiyun
176*4882a593Smuzhiyun pteg = kvmppc_mmu_get_pteg(vcpu, vsid, eaddr, primary);
177*4882a593Smuzhiyun
178*4882a593Smuzhiyun /* not evicting yet */
179*4882a593Smuzhiyun if (!evict && (pteg[rr] & PTE_V)) {
180*4882a593Smuzhiyun rr += 2;
181*4882a593Smuzhiyun goto next_pteg;
182*4882a593Smuzhiyun }
183*4882a593Smuzhiyun
184*4882a593Smuzhiyun dprintk_mmu("KVM: old PTEG: %p (%d)\n", pteg, rr);
185*4882a593Smuzhiyun dprintk_mmu("KVM: %08x - %08x\n", pteg[0], pteg[1]);
186*4882a593Smuzhiyun dprintk_mmu("KVM: %08x - %08x\n", pteg[2], pteg[3]);
187*4882a593Smuzhiyun dprintk_mmu("KVM: %08x - %08x\n", pteg[4], pteg[5]);
188*4882a593Smuzhiyun dprintk_mmu("KVM: %08x - %08x\n", pteg[6], pteg[7]);
189*4882a593Smuzhiyun dprintk_mmu("KVM: %08x - %08x\n", pteg[8], pteg[9]);
190*4882a593Smuzhiyun dprintk_mmu("KVM: %08x - %08x\n", pteg[10], pteg[11]);
191*4882a593Smuzhiyun dprintk_mmu("KVM: %08x - %08x\n", pteg[12], pteg[13]);
192*4882a593Smuzhiyun dprintk_mmu("KVM: %08x - %08x\n", pteg[14], pteg[15]);
193*4882a593Smuzhiyun
194*4882a593Smuzhiyun pteg0 = ((eaddr & 0x0fffffff) >> 22) | (vsid << 7) | PTE_V |
195*4882a593Smuzhiyun (primary ? 0 : PTE_SEC);
196*4882a593Smuzhiyun pteg1 = hpaddr | PTE_M | PTE_R | PTE_C;
197*4882a593Smuzhiyun
198*4882a593Smuzhiyun if (orig_pte->may_write && writable) {
199*4882a593Smuzhiyun pteg1 |= PP_RWRW;
200*4882a593Smuzhiyun mark_page_dirty(vcpu->kvm, orig_pte->raddr >> PAGE_SHIFT);
201*4882a593Smuzhiyun } else {
202*4882a593Smuzhiyun pteg1 |= PP_RWRX;
203*4882a593Smuzhiyun }
204*4882a593Smuzhiyun
205*4882a593Smuzhiyun if (orig_pte->may_execute)
206*4882a593Smuzhiyun kvmppc_mmu_flush_icache(hpaddr >> PAGE_SHIFT);
207*4882a593Smuzhiyun
208*4882a593Smuzhiyun local_irq_disable();
209*4882a593Smuzhiyun
210*4882a593Smuzhiyun if (pteg[rr]) {
211*4882a593Smuzhiyun pteg[rr] = 0;
212*4882a593Smuzhiyun asm volatile ("sync");
213*4882a593Smuzhiyun }
214*4882a593Smuzhiyun pteg[rr + 1] = pteg1;
215*4882a593Smuzhiyun pteg[rr] = pteg0;
216*4882a593Smuzhiyun asm volatile ("sync");
217*4882a593Smuzhiyun
218*4882a593Smuzhiyun local_irq_enable();
219*4882a593Smuzhiyun
220*4882a593Smuzhiyun dprintk_mmu("KVM: new PTEG: %p\n", pteg);
221*4882a593Smuzhiyun dprintk_mmu("KVM: %08x - %08x\n", pteg[0], pteg[1]);
222*4882a593Smuzhiyun dprintk_mmu("KVM: %08x - %08x\n", pteg[2], pteg[3]);
223*4882a593Smuzhiyun dprintk_mmu("KVM: %08x - %08x\n", pteg[4], pteg[5]);
224*4882a593Smuzhiyun dprintk_mmu("KVM: %08x - %08x\n", pteg[6], pteg[7]);
225*4882a593Smuzhiyun dprintk_mmu("KVM: %08x - %08x\n", pteg[8], pteg[9]);
226*4882a593Smuzhiyun dprintk_mmu("KVM: %08x - %08x\n", pteg[10], pteg[11]);
227*4882a593Smuzhiyun dprintk_mmu("KVM: %08x - %08x\n", pteg[12], pteg[13]);
228*4882a593Smuzhiyun dprintk_mmu("KVM: %08x - %08x\n", pteg[14], pteg[15]);
229*4882a593Smuzhiyun
230*4882a593Smuzhiyun
231*4882a593Smuzhiyun /* Now tell our Shadow PTE code about the new page */
232*4882a593Smuzhiyun
233*4882a593Smuzhiyun pte = kvmppc_mmu_hpte_cache_next(vcpu);
234*4882a593Smuzhiyun if (!pte) {
235*4882a593Smuzhiyun kvm_release_pfn_clean(hpaddr >> PAGE_SHIFT);
236*4882a593Smuzhiyun r = -EAGAIN;
237*4882a593Smuzhiyun goto out;
238*4882a593Smuzhiyun }
239*4882a593Smuzhiyun
240*4882a593Smuzhiyun dprintk_mmu("KVM: %c%c Map 0x%llx: [%lx] 0x%llx (0x%llx) -> %lx\n",
241*4882a593Smuzhiyun orig_pte->may_write ? 'w' : '-',
242*4882a593Smuzhiyun orig_pte->may_execute ? 'x' : '-',
243*4882a593Smuzhiyun orig_pte->eaddr, (ulong)pteg, vpn,
244*4882a593Smuzhiyun orig_pte->vpage, hpaddr);
245*4882a593Smuzhiyun
246*4882a593Smuzhiyun pte->slot = (ulong)&pteg[rr];
247*4882a593Smuzhiyun pte->host_vpn = vpn;
248*4882a593Smuzhiyun pte->pte = *orig_pte;
249*4882a593Smuzhiyun pte->pfn = hpaddr >> PAGE_SHIFT;
250*4882a593Smuzhiyun
251*4882a593Smuzhiyun kvmppc_mmu_hpte_cache_map(vcpu, pte);
252*4882a593Smuzhiyun
253*4882a593Smuzhiyun kvm_release_pfn_clean(hpaddr >> PAGE_SHIFT);
254*4882a593Smuzhiyun out:
255*4882a593Smuzhiyun return r;
256*4882a593Smuzhiyun }
257*4882a593Smuzhiyun
kvmppc_mmu_unmap_page(struct kvm_vcpu * vcpu,struct kvmppc_pte * pte)258*4882a593Smuzhiyun void kvmppc_mmu_unmap_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte)
259*4882a593Smuzhiyun {
260*4882a593Smuzhiyun kvmppc_mmu_pte_vflush(vcpu, pte->vpage, 0xfffffffffULL);
261*4882a593Smuzhiyun }
262*4882a593Smuzhiyun
create_sid_map(struct kvm_vcpu * vcpu,u64 gvsid)263*4882a593Smuzhiyun static struct kvmppc_sid_map *create_sid_map(struct kvm_vcpu *vcpu, u64 gvsid)
264*4882a593Smuzhiyun {
265*4882a593Smuzhiyun struct kvmppc_sid_map *map;
266*4882a593Smuzhiyun struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu);
267*4882a593Smuzhiyun u16 sid_map_mask;
268*4882a593Smuzhiyun static int backwards_map = 0;
269*4882a593Smuzhiyun
270*4882a593Smuzhiyun if (kvmppc_get_msr(vcpu) & MSR_PR)
271*4882a593Smuzhiyun gvsid |= VSID_PR;
272*4882a593Smuzhiyun
273*4882a593Smuzhiyun /* We might get collisions that trap in preceding order, so let's
274*4882a593Smuzhiyun map them differently */
275*4882a593Smuzhiyun
276*4882a593Smuzhiyun sid_map_mask = kvmppc_sid_hash(vcpu, gvsid);
277*4882a593Smuzhiyun if (backwards_map)
278*4882a593Smuzhiyun sid_map_mask = SID_MAP_MASK - sid_map_mask;
279*4882a593Smuzhiyun
280*4882a593Smuzhiyun map = &to_book3s(vcpu)->sid_map[sid_map_mask];
281*4882a593Smuzhiyun
282*4882a593Smuzhiyun /* Make sure we're taking the other map next time */
283*4882a593Smuzhiyun backwards_map = !backwards_map;
284*4882a593Smuzhiyun
285*4882a593Smuzhiyun /* Uh-oh ... out of mappings. Let's flush! */
286*4882a593Smuzhiyun if (vcpu_book3s->vsid_next >= VSID_POOL_SIZE) {
287*4882a593Smuzhiyun vcpu_book3s->vsid_next = 0;
288*4882a593Smuzhiyun memset(vcpu_book3s->sid_map, 0,
289*4882a593Smuzhiyun sizeof(struct kvmppc_sid_map) * SID_MAP_NUM);
290*4882a593Smuzhiyun kvmppc_mmu_pte_flush(vcpu, 0, 0);
291*4882a593Smuzhiyun kvmppc_mmu_flush_segments(vcpu);
292*4882a593Smuzhiyun }
293*4882a593Smuzhiyun map->host_vsid = vcpu_book3s->vsid_pool[vcpu_book3s->vsid_next];
294*4882a593Smuzhiyun vcpu_book3s->vsid_next++;
295*4882a593Smuzhiyun
296*4882a593Smuzhiyun map->guest_vsid = gvsid;
297*4882a593Smuzhiyun map->valid = true;
298*4882a593Smuzhiyun
299*4882a593Smuzhiyun return map;
300*4882a593Smuzhiyun }
301*4882a593Smuzhiyun
kvmppc_mmu_map_segment(struct kvm_vcpu * vcpu,ulong eaddr)302*4882a593Smuzhiyun int kvmppc_mmu_map_segment(struct kvm_vcpu *vcpu, ulong eaddr)
303*4882a593Smuzhiyun {
304*4882a593Smuzhiyun u32 esid = eaddr >> SID_SHIFT;
305*4882a593Smuzhiyun u64 gvsid;
306*4882a593Smuzhiyun u32 sr;
307*4882a593Smuzhiyun struct kvmppc_sid_map *map;
308*4882a593Smuzhiyun struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
309*4882a593Smuzhiyun int r = 0;
310*4882a593Smuzhiyun
311*4882a593Smuzhiyun if (vcpu->arch.mmu.esid_to_vsid(vcpu, esid, &gvsid)) {
312*4882a593Smuzhiyun /* Invalidate an entry */
313*4882a593Smuzhiyun svcpu->sr[esid] = SR_INVALID;
314*4882a593Smuzhiyun r = -ENOENT;
315*4882a593Smuzhiyun goto out;
316*4882a593Smuzhiyun }
317*4882a593Smuzhiyun
318*4882a593Smuzhiyun map = find_sid_vsid(vcpu, gvsid);
319*4882a593Smuzhiyun if (!map)
320*4882a593Smuzhiyun map = create_sid_map(vcpu, gvsid);
321*4882a593Smuzhiyun
322*4882a593Smuzhiyun map->guest_esid = esid;
323*4882a593Smuzhiyun sr = map->host_vsid | SR_KP;
324*4882a593Smuzhiyun svcpu->sr[esid] = sr;
325*4882a593Smuzhiyun
326*4882a593Smuzhiyun dprintk_sr("MMU: mtsr %d, 0x%x\n", esid, sr);
327*4882a593Smuzhiyun
328*4882a593Smuzhiyun out:
329*4882a593Smuzhiyun svcpu_put(svcpu);
330*4882a593Smuzhiyun return r;
331*4882a593Smuzhiyun }
332*4882a593Smuzhiyun
kvmppc_mmu_flush_segments(struct kvm_vcpu * vcpu)333*4882a593Smuzhiyun void kvmppc_mmu_flush_segments(struct kvm_vcpu *vcpu)
334*4882a593Smuzhiyun {
335*4882a593Smuzhiyun int i;
336*4882a593Smuzhiyun struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
337*4882a593Smuzhiyun
338*4882a593Smuzhiyun dprintk_sr("MMU: flushing all segments (%d)\n", ARRAY_SIZE(svcpu->sr));
339*4882a593Smuzhiyun for (i = 0; i < ARRAY_SIZE(svcpu->sr); i++)
340*4882a593Smuzhiyun svcpu->sr[i] = SR_INVALID;
341*4882a593Smuzhiyun
342*4882a593Smuzhiyun svcpu_put(svcpu);
343*4882a593Smuzhiyun }
344*4882a593Smuzhiyun
kvmppc_mmu_destroy_pr(struct kvm_vcpu * vcpu)345*4882a593Smuzhiyun void kvmppc_mmu_destroy_pr(struct kvm_vcpu *vcpu)
346*4882a593Smuzhiyun {
347*4882a593Smuzhiyun int i;
348*4882a593Smuzhiyun
349*4882a593Smuzhiyun kvmppc_mmu_hpte_destroy(vcpu);
350*4882a593Smuzhiyun preempt_disable();
351*4882a593Smuzhiyun for (i = 0; i < SID_CONTEXTS; i++)
352*4882a593Smuzhiyun __destroy_context(to_book3s(vcpu)->context_id[i]);
353*4882a593Smuzhiyun preempt_enable();
354*4882a593Smuzhiyun }
355*4882a593Smuzhiyun
356*4882a593Smuzhiyun /* From mm/mmu_context_hash32.c */
357*4882a593Smuzhiyun #define CTX_TO_VSID(c, id) ((((c) * (897 * 16)) + (id * 0x111)) & 0xffffff)
358*4882a593Smuzhiyun
kvmppc_mmu_init_pr(struct kvm_vcpu * vcpu)359*4882a593Smuzhiyun int kvmppc_mmu_init_pr(struct kvm_vcpu *vcpu)
360*4882a593Smuzhiyun {
361*4882a593Smuzhiyun struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
362*4882a593Smuzhiyun int err;
363*4882a593Smuzhiyun ulong sdr1;
364*4882a593Smuzhiyun int i;
365*4882a593Smuzhiyun int j;
366*4882a593Smuzhiyun
367*4882a593Smuzhiyun for (i = 0; i < SID_CONTEXTS; i++) {
368*4882a593Smuzhiyun err = __init_new_context();
369*4882a593Smuzhiyun if (err < 0)
370*4882a593Smuzhiyun goto init_fail;
371*4882a593Smuzhiyun vcpu3s->context_id[i] = err;
372*4882a593Smuzhiyun
373*4882a593Smuzhiyun /* Remember context id for this combination */
374*4882a593Smuzhiyun for (j = 0; j < 16; j++)
375*4882a593Smuzhiyun vcpu3s->vsid_pool[(i * 16) + j] = CTX_TO_VSID(err, j);
376*4882a593Smuzhiyun }
377*4882a593Smuzhiyun
378*4882a593Smuzhiyun vcpu3s->vsid_next = 0;
379*4882a593Smuzhiyun
380*4882a593Smuzhiyun /* Remember where the HTAB is */
381*4882a593Smuzhiyun asm ( "mfsdr1 %0" : "=r"(sdr1) );
382*4882a593Smuzhiyun htabmask = ((sdr1 & 0x1FF) << 16) | 0xFFC0;
383*4882a593Smuzhiyun htab = (ulong)__va(sdr1 & 0xffff0000);
384*4882a593Smuzhiyun
385*4882a593Smuzhiyun kvmppc_mmu_hpte_init(vcpu);
386*4882a593Smuzhiyun
387*4882a593Smuzhiyun return 0;
388*4882a593Smuzhiyun
389*4882a593Smuzhiyun init_fail:
390*4882a593Smuzhiyun for (j = 0; j < i; j++) {
391*4882a593Smuzhiyun if (!vcpu3s->context_id[j])
392*4882a593Smuzhiyun continue;
393*4882a593Smuzhiyun
394*4882a593Smuzhiyun __destroy_context(to_book3s(vcpu)->context_id[j]);
395*4882a593Smuzhiyun }
396*4882a593Smuzhiyun
397*4882a593Smuzhiyun return -1;
398*4882a593Smuzhiyun }
399