xref: /OK3568_Linux_fs/kernel/arch/powerpc/kvm/book3s_mmu_hpte.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Copyright (C) 2010 SUSE Linux Products GmbH. All rights reserved.
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  * Authors:
6*4882a593Smuzhiyun  *     Alexander Graf <agraf@suse.de>
7*4882a593Smuzhiyun  */
8*4882a593Smuzhiyun 
9*4882a593Smuzhiyun #include <linux/kvm_host.h>
10*4882a593Smuzhiyun #include <linux/hash.h>
11*4882a593Smuzhiyun #include <linux/slab.h>
12*4882a593Smuzhiyun #include <linux/rculist.h>
13*4882a593Smuzhiyun 
14*4882a593Smuzhiyun #include <asm/kvm_ppc.h>
15*4882a593Smuzhiyun #include <asm/kvm_book3s.h>
16*4882a593Smuzhiyun #include <asm/machdep.h>
17*4882a593Smuzhiyun #include <asm/mmu_context.h>
18*4882a593Smuzhiyun #include <asm/hw_irq.h>
19*4882a593Smuzhiyun 
20*4882a593Smuzhiyun #include "trace_pr.h"
21*4882a593Smuzhiyun 
22*4882a593Smuzhiyun #define PTE_SIZE	12
23*4882a593Smuzhiyun 
24*4882a593Smuzhiyun static struct kmem_cache *hpte_cache;
25*4882a593Smuzhiyun 
kvmppc_mmu_hash_pte(u64 eaddr)26*4882a593Smuzhiyun static inline u64 kvmppc_mmu_hash_pte(u64 eaddr)
27*4882a593Smuzhiyun {
28*4882a593Smuzhiyun 	return hash_64(eaddr >> PTE_SIZE, HPTEG_HASH_BITS_PTE);
29*4882a593Smuzhiyun }
30*4882a593Smuzhiyun 
kvmppc_mmu_hash_pte_long(u64 eaddr)31*4882a593Smuzhiyun static inline u64 kvmppc_mmu_hash_pte_long(u64 eaddr)
32*4882a593Smuzhiyun {
33*4882a593Smuzhiyun 	return hash_64((eaddr & 0x0ffff000) >> PTE_SIZE,
34*4882a593Smuzhiyun 		       HPTEG_HASH_BITS_PTE_LONG);
35*4882a593Smuzhiyun }
36*4882a593Smuzhiyun 
kvmppc_mmu_hash_vpte(u64 vpage)37*4882a593Smuzhiyun static inline u64 kvmppc_mmu_hash_vpte(u64 vpage)
38*4882a593Smuzhiyun {
39*4882a593Smuzhiyun 	return hash_64(vpage & 0xfffffffffULL, HPTEG_HASH_BITS_VPTE);
40*4882a593Smuzhiyun }
41*4882a593Smuzhiyun 
kvmppc_mmu_hash_vpte_long(u64 vpage)42*4882a593Smuzhiyun static inline u64 kvmppc_mmu_hash_vpte_long(u64 vpage)
43*4882a593Smuzhiyun {
44*4882a593Smuzhiyun 	return hash_64((vpage & 0xffffff000ULL) >> 12,
45*4882a593Smuzhiyun 		       HPTEG_HASH_BITS_VPTE_LONG);
46*4882a593Smuzhiyun }
47*4882a593Smuzhiyun 
48*4882a593Smuzhiyun #ifdef CONFIG_PPC_BOOK3S_64
kvmppc_mmu_hash_vpte_64k(u64 vpage)49*4882a593Smuzhiyun static inline u64 kvmppc_mmu_hash_vpte_64k(u64 vpage)
50*4882a593Smuzhiyun {
51*4882a593Smuzhiyun 	return hash_64((vpage & 0xffffffff0ULL) >> 4,
52*4882a593Smuzhiyun 		       HPTEG_HASH_BITS_VPTE_64K);
53*4882a593Smuzhiyun }
54*4882a593Smuzhiyun #endif
55*4882a593Smuzhiyun 
kvmppc_mmu_hpte_cache_map(struct kvm_vcpu * vcpu,struct hpte_cache * pte)56*4882a593Smuzhiyun void kvmppc_mmu_hpte_cache_map(struct kvm_vcpu *vcpu, struct hpte_cache *pte)
57*4882a593Smuzhiyun {
58*4882a593Smuzhiyun 	u64 index;
59*4882a593Smuzhiyun 	struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
60*4882a593Smuzhiyun 
61*4882a593Smuzhiyun 	trace_kvm_book3s_mmu_map(pte);
62*4882a593Smuzhiyun 
63*4882a593Smuzhiyun 	spin_lock(&vcpu3s->mmu_lock);
64*4882a593Smuzhiyun 
65*4882a593Smuzhiyun 	/* Add to ePTE list */
66*4882a593Smuzhiyun 	index = kvmppc_mmu_hash_pte(pte->pte.eaddr);
67*4882a593Smuzhiyun 	hlist_add_head_rcu(&pte->list_pte, &vcpu3s->hpte_hash_pte[index]);
68*4882a593Smuzhiyun 
69*4882a593Smuzhiyun 	/* Add to ePTE_long list */
70*4882a593Smuzhiyun 	index = kvmppc_mmu_hash_pte_long(pte->pte.eaddr);
71*4882a593Smuzhiyun 	hlist_add_head_rcu(&pte->list_pte_long,
72*4882a593Smuzhiyun 			   &vcpu3s->hpte_hash_pte_long[index]);
73*4882a593Smuzhiyun 
74*4882a593Smuzhiyun 	/* Add to vPTE list */
75*4882a593Smuzhiyun 	index = kvmppc_mmu_hash_vpte(pte->pte.vpage);
76*4882a593Smuzhiyun 	hlist_add_head_rcu(&pte->list_vpte, &vcpu3s->hpte_hash_vpte[index]);
77*4882a593Smuzhiyun 
78*4882a593Smuzhiyun 	/* Add to vPTE_long list */
79*4882a593Smuzhiyun 	index = kvmppc_mmu_hash_vpte_long(pte->pte.vpage);
80*4882a593Smuzhiyun 	hlist_add_head_rcu(&pte->list_vpte_long,
81*4882a593Smuzhiyun 			   &vcpu3s->hpte_hash_vpte_long[index]);
82*4882a593Smuzhiyun 
83*4882a593Smuzhiyun #ifdef CONFIG_PPC_BOOK3S_64
84*4882a593Smuzhiyun 	/* Add to vPTE_64k list */
85*4882a593Smuzhiyun 	index = kvmppc_mmu_hash_vpte_64k(pte->pte.vpage);
86*4882a593Smuzhiyun 	hlist_add_head_rcu(&pte->list_vpte_64k,
87*4882a593Smuzhiyun 			   &vcpu3s->hpte_hash_vpte_64k[index]);
88*4882a593Smuzhiyun #endif
89*4882a593Smuzhiyun 
90*4882a593Smuzhiyun 	vcpu3s->hpte_cache_count++;
91*4882a593Smuzhiyun 
92*4882a593Smuzhiyun 	spin_unlock(&vcpu3s->mmu_lock);
93*4882a593Smuzhiyun }
94*4882a593Smuzhiyun 
free_pte_rcu(struct rcu_head * head)95*4882a593Smuzhiyun static void free_pte_rcu(struct rcu_head *head)
96*4882a593Smuzhiyun {
97*4882a593Smuzhiyun 	struct hpte_cache *pte = container_of(head, struct hpte_cache, rcu_head);
98*4882a593Smuzhiyun 	kmem_cache_free(hpte_cache, pte);
99*4882a593Smuzhiyun }
100*4882a593Smuzhiyun 
invalidate_pte(struct kvm_vcpu * vcpu,struct hpte_cache * pte)101*4882a593Smuzhiyun static void invalidate_pte(struct kvm_vcpu *vcpu, struct hpte_cache *pte)
102*4882a593Smuzhiyun {
103*4882a593Smuzhiyun 	struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
104*4882a593Smuzhiyun 
105*4882a593Smuzhiyun 	trace_kvm_book3s_mmu_invalidate(pte);
106*4882a593Smuzhiyun 
107*4882a593Smuzhiyun 	/* Different for 32 and 64 bit */
108*4882a593Smuzhiyun 	kvmppc_mmu_invalidate_pte(vcpu, pte);
109*4882a593Smuzhiyun 
110*4882a593Smuzhiyun 	spin_lock(&vcpu3s->mmu_lock);
111*4882a593Smuzhiyun 
112*4882a593Smuzhiyun 	/* pte already invalidated in between? */
113*4882a593Smuzhiyun 	if (hlist_unhashed(&pte->list_pte)) {
114*4882a593Smuzhiyun 		spin_unlock(&vcpu3s->mmu_lock);
115*4882a593Smuzhiyun 		return;
116*4882a593Smuzhiyun 	}
117*4882a593Smuzhiyun 
118*4882a593Smuzhiyun 	hlist_del_init_rcu(&pte->list_pte);
119*4882a593Smuzhiyun 	hlist_del_init_rcu(&pte->list_pte_long);
120*4882a593Smuzhiyun 	hlist_del_init_rcu(&pte->list_vpte);
121*4882a593Smuzhiyun 	hlist_del_init_rcu(&pte->list_vpte_long);
122*4882a593Smuzhiyun #ifdef CONFIG_PPC_BOOK3S_64
123*4882a593Smuzhiyun 	hlist_del_init_rcu(&pte->list_vpte_64k);
124*4882a593Smuzhiyun #endif
125*4882a593Smuzhiyun 	vcpu3s->hpte_cache_count--;
126*4882a593Smuzhiyun 
127*4882a593Smuzhiyun 	spin_unlock(&vcpu3s->mmu_lock);
128*4882a593Smuzhiyun 
129*4882a593Smuzhiyun 	call_rcu(&pte->rcu_head, free_pte_rcu);
130*4882a593Smuzhiyun }
131*4882a593Smuzhiyun 
kvmppc_mmu_pte_flush_all(struct kvm_vcpu * vcpu)132*4882a593Smuzhiyun static void kvmppc_mmu_pte_flush_all(struct kvm_vcpu *vcpu)
133*4882a593Smuzhiyun {
134*4882a593Smuzhiyun 	struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
135*4882a593Smuzhiyun 	struct hpte_cache *pte;
136*4882a593Smuzhiyun 	int i;
137*4882a593Smuzhiyun 
138*4882a593Smuzhiyun 	rcu_read_lock();
139*4882a593Smuzhiyun 
140*4882a593Smuzhiyun 	for (i = 0; i < HPTEG_HASH_NUM_VPTE_LONG; i++) {
141*4882a593Smuzhiyun 		struct hlist_head *list = &vcpu3s->hpte_hash_vpte_long[i];
142*4882a593Smuzhiyun 
143*4882a593Smuzhiyun 		hlist_for_each_entry_rcu(pte, list, list_vpte_long)
144*4882a593Smuzhiyun 			invalidate_pte(vcpu, pte);
145*4882a593Smuzhiyun 	}
146*4882a593Smuzhiyun 
147*4882a593Smuzhiyun 	rcu_read_unlock();
148*4882a593Smuzhiyun }
149*4882a593Smuzhiyun 
kvmppc_mmu_pte_flush_page(struct kvm_vcpu * vcpu,ulong guest_ea)150*4882a593Smuzhiyun static void kvmppc_mmu_pte_flush_page(struct kvm_vcpu *vcpu, ulong guest_ea)
151*4882a593Smuzhiyun {
152*4882a593Smuzhiyun 	struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
153*4882a593Smuzhiyun 	struct hlist_head *list;
154*4882a593Smuzhiyun 	struct hpte_cache *pte;
155*4882a593Smuzhiyun 
156*4882a593Smuzhiyun 	/* Find the list of entries in the map */
157*4882a593Smuzhiyun 	list = &vcpu3s->hpte_hash_pte[kvmppc_mmu_hash_pte(guest_ea)];
158*4882a593Smuzhiyun 
159*4882a593Smuzhiyun 	rcu_read_lock();
160*4882a593Smuzhiyun 
161*4882a593Smuzhiyun 	/* Check the list for matching entries and invalidate */
162*4882a593Smuzhiyun 	hlist_for_each_entry_rcu(pte, list, list_pte)
163*4882a593Smuzhiyun 		if ((pte->pte.eaddr & ~0xfffUL) == guest_ea)
164*4882a593Smuzhiyun 			invalidate_pte(vcpu, pte);
165*4882a593Smuzhiyun 
166*4882a593Smuzhiyun 	rcu_read_unlock();
167*4882a593Smuzhiyun }
168*4882a593Smuzhiyun 
kvmppc_mmu_pte_flush_long(struct kvm_vcpu * vcpu,ulong guest_ea)169*4882a593Smuzhiyun static void kvmppc_mmu_pte_flush_long(struct kvm_vcpu *vcpu, ulong guest_ea)
170*4882a593Smuzhiyun {
171*4882a593Smuzhiyun 	struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
172*4882a593Smuzhiyun 	struct hlist_head *list;
173*4882a593Smuzhiyun 	struct hpte_cache *pte;
174*4882a593Smuzhiyun 
175*4882a593Smuzhiyun 	/* Find the list of entries in the map */
176*4882a593Smuzhiyun 	list = &vcpu3s->hpte_hash_pte_long[
177*4882a593Smuzhiyun 			kvmppc_mmu_hash_pte_long(guest_ea)];
178*4882a593Smuzhiyun 
179*4882a593Smuzhiyun 	rcu_read_lock();
180*4882a593Smuzhiyun 
181*4882a593Smuzhiyun 	/* Check the list for matching entries and invalidate */
182*4882a593Smuzhiyun 	hlist_for_each_entry_rcu(pte, list, list_pte_long)
183*4882a593Smuzhiyun 		if ((pte->pte.eaddr & 0x0ffff000UL) == guest_ea)
184*4882a593Smuzhiyun 			invalidate_pte(vcpu, pte);
185*4882a593Smuzhiyun 
186*4882a593Smuzhiyun 	rcu_read_unlock();
187*4882a593Smuzhiyun }
188*4882a593Smuzhiyun 
kvmppc_mmu_pte_flush(struct kvm_vcpu * vcpu,ulong guest_ea,ulong ea_mask)189*4882a593Smuzhiyun void kvmppc_mmu_pte_flush(struct kvm_vcpu *vcpu, ulong guest_ea, ulong ea_mask)
190*4882a593Smuzhiyun {
191*4882a593Smuzhiyun 	trace_kvm_book3s_mmu_flush("", vcpu, guest_ea, ea_mask);
192*4882a593Smuzhiyun 	guest_ea &= ea_mask;
193*4882a593Smuzhiyun 
194*4882a593Smuzhiyun 	switch (ea_mask) {
195*4882a593Smuzhiyun 	case ~0xfffUL:
196*4882a593Smuzhiyun 		kvmppc_mmu_pte_flush_page(vcpu, guest_ea);
197*4882a593Smuzhiyun 		break;
198*4882a593Smuzhiyun 	case 0x0ffff000:
199*4882a593Smuzhiyun 		kvmppc_mmu_pte_flush_long(vcpu, guest_ea);
200*4882a593Smuzhiyun 		break;
201*4882a593Smuzhiyun 	case 0:
202*4882a593Smuzhiyun 		/* Doing a complete flush -> start from scratch */
203*4882a593Smuzhiyun 		kvmppc_mmu_pte_flush_all(vcpu);
204*4882a593Smuzhiyun 		break;
205*4882a593Smuzhiyun 	default:
206*4882a593Smuzhiyun 		WARN_ON(1);
207*4882a593Smuzhiyun 		break;
208*4882a593Smuzhiyun 	}
209*4882a593Smuzhiyun }
210*4882a593Smuzhiyun 
211*4882a593Smuzhiyun /* Flush with mask 0xfffffffff */
kvmppc_mmu_pte_vflush_short(struct kvm_vcpu * vcpu,u64 guest_vp)212*4882a593Smuzhiyun static void kvmppc_mmu_pte_vflush_short(struct kvm_vcpu *vcpu, u64 guest_vp)
213*4882a593Smuzhiyun {
214*4882a593Smuzhiyun 	struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
215*4882a593Smuzhiyun 	struct hlist_head *list;
216*4882a593Smuzhiyun 	struct hpte_cache *pte;
217*4882a593Smuzhiyun 	u64 vp_mask = 0xfffffffffULL;
218*4882a593Smuzhiyun 
219*4882a593Smuzhiyun 	list = &vcpu3s->hpte_hash_vpte[kvmppc_mmu_hash_vpte(guest_vp)];
220*4882a593Smuzhiyun 
221*4882a593Smuzhiyun 	rcu_read_lock();
222*4882a593Smuzhiyun 
223*4882a593Smuzhiyun 	/* Check the list for matching entries and invalidate */
224*4882a593Smuzhiyun 	hlist_for_each_entry_rcu(pte, list, list_vpte)
225*4882a593Smuzhiyun 		if ((pte->pte.vpage & vp_mask) == guest_vp)
226*4882a593Smuzhiyun 			invalidate_pte(vcpu, pte);
227*4882a593Smuzhiyun 
228*4882a593Smuzhiyun 	rcu_read_unlock();
229*4882a593Smuzhiyun }
230*4882a593Smuzhiyun 
231*4882a593Smuzhiyun #ifdef CONFIG_PPC_BOOK3S_64
232*4882a593Smuzhiyun /* Flush with mask 0xffffffff0 */
kvmppc_mmu_pte_vflush_64k(struct kvm_vcpu * vcpu,u64 guest_vp)233*4882a593Smuzhiyun static void kvmppc_mmu_pte_vflush_64k(struct kvm_vcpu *vcpu, u64 guest_vp)
234*4882a593Smuzhiyun {
235*4882a593Smuzhiyun 	struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
236*4882a593Smuzhiyun 	struct hlist_head *list;
237*4882a593Smuzhiyun 	struct hpte_cache *pte;
238*4882a593Smuzhiyun 	u64 vp_mask = 0xffffffff0ULL;
239*4882a593Smuzhiyun 
240*4882a593Smuzhiyun 	list = &vcpu3s->hpte_hash_vpte_64k[
241*4882a593Smuzhiyun 		kvmppc_mmu_hash_vpte_64k(guest_vp)];
242*4882a593Smuzhiyun 
243*4882a593Smuzhiyun 	rcu_read_lock();
244*4882a593Smuzhiyun 
245*4882a593Smuzhiyun 	/* Check the list for matching entries and invalidate */
246*4882a593Smuzhiyun 	hlist_for_each_entry_rcu(pte, list, list_vpte_64k)
247*4882a593Smuzhiyun 		if ((pte->pte.vpage & vp_mask) == guest_vp)
248*4882a593Smuzhiyun 			invalidate_pte(vcpu, pte);
249*4882a593Smuzhiyun 
250*4882a593Smuzhiyun 	rcu_read_unlock();
251*4882a593Smuzhiyun }
252*4882a593Smuzhiyun #endif
253*4882a593Smuzhiyun 
254*4882a593Smuzhiyun /* Flush with mask 0xffffff000 */
kvmppc_mmu_pte_vflush_long(struct kvm_vcpu * vcpu,u64 guest_vp)255*4882a593Smuzhiyun static void kvmppc_mmu_pte_vflush_long(struct kvm_vcpu *vcpu, u64 guest_vp)
256*4882a593Smuzhiyun {
257*4882a593Smuzhiyun 	struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
258*4882a593Smuzhiyun 	struct hlist_head *list;
259*4882a593Smuzhiyun 	struct hpte_cache *pte;
260*4882a593Smuzhiyun 	u64 vp_mask = 0xffffff000ULL;
261*4882a593Smuzhiyun 
262*4882a593Smuzhiyun 	list = &vcpu3s->hpte_hash_vpte_long[
263*4882a593Smuzhiyun 		kvmppc_mmu_hash_vpte_long(guest_vp)];
264*4882a593Smuzhiyun 
265*4882a593Smuzhiyun 	rcu_read_lock();
266*4882a593Smuzhiyun 
267*4882a593Smuzhiyun 	/* Check the list for matching entries and invalidate */
268*4882a593Smuzhiyun 	hlist_for_each_entry_rcu(pte, list, list_vpte_long)
269*4882a593Smuzhiyun 		if ((pte->pte.vpage & vp_mask) == guest_vp)
270*4882a593Smuzhiyun 			invalidate_pte(vcpu, pte);
271*4882a593Smuzhiyun 
272*4882a593Smuzhiyun 	rcu_read_unlock();
273*4882a593Smuzhiyun }
274*4882a593Smuzhiyun 
kvmppc_mmu_pte_vflush(struct kvm_vcpu * vcpu,u64 guest_vp,u64 vp_mask)275*4882a593Smuzhiyun void kvmppc_mmu_pte_vflush(struct kvm_vcpu *vcpu, u64 guest_vp, u64 vp_mask)
276*4882a593Smuzhiyun {
277*4882a593Smuzhiyun 	trace_kvm_book3s_mmu_flush("v", vcpu, guest_vp, vp_mask);
278*4882a593Smuzhiyun 	guest_vp &= vp_mask;
279*4882a593Smuzhiyun 
280*4882a593Smuzhiyun 	switch(vp_mask) {
281*4882a593Smuzhiyun 	case 0xfffffffffULL:
282*4882a593Smuzhiyun 		kvmppc_mmu_pte_vflush_short(vcpu, guest_vp);
283*4882a593Smuzhiyun 		break;
284*4882a593Smuzhiyun #ifdef CONFIG_PPC_BOOK3S_64
285*4882a593Smuzhiyun 	case 0xffffffff0ULL:
286*4882a593Smuzhiyun 		kvmppc_mmu_pte_vflush_64k(vcpu, guest_vp);
287*4882a593Smuzhiyun 		break;
288*4882a593Smuzhiyun #endif
289*4882a593Smuzhiyun 	case 0xffffff000ULL:
290*4882a593Smuzhiyun 		kvmppc_mmu_pte_vflush_long(vcpu, guest_vp);
291*4882a593Smuzhiyun 		break;
292*4882a593Smuzhiyun 	default:
293*4882a593Smuzhiyun 		WARN_ON(1);
294*4882a593Smuzhiyun 		return;
295*4882a593Smuzhiyun 	}
296*4882a593Smuzhiyun }
297*4882a593Smuzhiyun 
kvmppc_mmu_pte_pflush(struct kvm_vcpu * vcpu,ulong pa_start,ulong pa_end)298*4882a593Smuzhiyun void kvmppc_mmu_pte_pflush(struct kvm_vcpu *vcpu, ulong pa_start, ulong pa_end)
299*4882a593Smuzhiyun {
300*4882a593Smuzhiyun 	struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
301*4882a593Smuzhiyun 	struct hpte_cache *pte;
302*4882a593Smuzhiyun 	int i;
303*4882a593Smuzhiyun 
304*4882a593Smuzhiyun 	trace_kvm_book3s_mmu_flush("p", vcpu, pa_start, pa_end);
305*4882a593Smuzhiyun 
306*4882a593Smuzhiyun 	rcu_read_lock();
307*4882a593Smuzhiyun 
308*4882a593Smuzhiyun 	for (i = 0; i < HPTEG_HASH_NUM_VPTE_LONG; i++) {
309*4882a593Smuzhiyun 		struct hlist_head *list = &vcpu3s->hpte_hash_vpte_long[i];
310*4882a593Smuzhiyun 
311*4882a593Smuzhiyun 		hlist_for_each_entry_rcu(pte, list, list_vpte_long)
312*4882a593Smuzhiyun 			if ((pte->pte.raddr >= pa_start) &&
313*4882a593Smuzhiyun 			    (pte->pte.raddr < pa_end))
314*4882a593Smuzhiyun 				invalidate_pte(vcpu, pte);
315*4882a593Smuzhiyun 	}
316*4882a593Smuzhiyun 
317*4882a593Smuzhiyun 	rcu_read_unlock();
318*4882a593Smuzhiyun }
319*4882a593Smuzhiyun 
kvmppc_mmu_hpte_cache_next(struct kvm_vcpu * vcpu)320*4882a593Smuzhiyun struct hpte_cache *kvmppc_mmu_hpte_cache_next(struct kvm_vcpu *vcpu)
321*4882a593Smuzhiyun {
322*4882a593Smuzhiyun 	struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
323*4882a593Smuzhiyun 	struct hpte_cache *pte;
324*4882a593Smuzhiyun 
325*4882a593Smuzhiyun 	if (vcpu3s->hpte_cache_count == HPTEG_CACHE_NUM)
326*4882a593Smuzhiyun 		kvmppc_mmu_pte_flush_all(vcpu);
327*4882a593Smuzhiyun 
328*4882a593Smuzhiyun 	pte = kmem_cache_zalloc(hpte_cache, GFP_KERNEL);
329*4882a593Smuzhiyun 
330*4882a593Smuzhiyun 	return pte;
331*4882a593Smuzhiyun }
332*4882a593Smuzhiyun 
kvmppc_mmu_hpte_cache_free(struct hpte_cache * pte)333*4882a593Smuzhiyun void kvmppc_mmu_hpte_cache_free(struct hpte_cache *pte)
334*4882a593Smuzhiyun {
335*4882a593Smuzhiyun 	kmem_cache_free(hpte_cache, pte);
336*4882a593Smuzhiyun }
337*4882a593Smuzhiyun 
kvmppc_mmu_hpte_destroy(struct kvm_vcpu * vcpu)338*4882a593Smuzhiyun void kvmppc_mmu_hpte_destroy(struct kvm_vcpu *vcpu)
339*4882a593Smuzhiyun {
340*4882a593Smuzhiyun 	kvmppc_mmu_pte_flush(vcpu, 0, 0);
341*4882a593Smuzhiyun }
342*4882a593Smuzhiyun 
kvmppc_mmu_hpte_init_hash(struct hlist_head * hash_list,int len)343*4882a593Smuzhiyun static void kvmppc_mmu_hpte_init_hash(struct hlist_head *hash_list, int len)
344*4882a593Smuzhiyun {
345*4882a593Smuzhiyun 	int i;
346*4882a593Smuzhiyun 
347*4882a593Smuzhiyun 	for (i = 0; i < len; i++)
348*4882a593Smuzhiyun 		INIT_HLIST_HEAD(&hash_list[i]);
349*4882a593Smuzhiyun }
350*4882a593Smuzhiyun 
kvmppc_mmu_hpte_init(struct kvm_vcpu * vcpu)351*4882a593Smuzhiyun int kvmppc_mmu_hpte_init(struct kvm_vcpu *vcpu)
352*4882a593Smuzhiyun {
353*4882a593Smuzhiyun 	struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
354*4882a593Smuzhiyun 
355*4882a593Smuzhiyun 	/* init hpte lookup hashes */
356*4882a593Smuzhiyun 	kvmppc_mmu_hpte_init_hash(vcpu3s->hpte_hash_pte,
357*4882a593Smuzhiyun 				  ARRAY_SIZE(vcpu3s->hpte_hash_pte));
358*4882a593Smuzhiyun 	kvmppc_mmu_hpte_init_hash(vcpu3s->hpte_hash_pte_long,
359*4882a593Smuzhiyun 				  ARRAY_SIZE(vcpu3s->hpte_hash_pte_long));
360*4882a593Smuzhiyun 	kvmppc_mmu_hpte_init_hash(vcpu3s->hpte_hash_vpte,
361*4882a593Smuzhiyun 				  ARRAY_SIZE(vcpu3s->hpte_hash_vpte));
362*4882a593Smuzhiyun 	kvmppc_mmu_hpte_init_hash(vcpu3s->hpte_hash_vpte_long,
363*4882a593Smuzhiyun 				  ARRAY_SIZE(vcpu3s->hpte_hash_vpte_long));
364*4882a593Smuzhiyun #ifdef CONFIG_PPC_BOOK3S_64
365*4882a593Smuzhiyun 	kvmppc_mmu_hpte_init_hash(vcpu3s->hpte_hash_vpte_64k,
366*4882a593Smuzhiyun 				  ARRAY_SIZE(vcpu3s->hpte_hash_vpte_64k));
367*4882a593Smuzhiyun #endif
368*4882a593Smuzhiyun 
369*4882a593Smuzhiyun 	spin_lock_init(&vcpu3s->mmu_lock);
370*4882a593Smuzhiyun 
371*4882a593Smuzhiyun 	return 0;
372*4882a593Smuzhiyun }
373*4882a593Smuzhiyun 
kvmppc_mmu_hpte_sysinit(void)374*4882a593Smuzhiyun int kvmppc_mmu_hpte_sysinit(void)
375*4882a593Smuzhiyun {
376*4882a593Smuzhiyun 	/* init hpte slab cache */
377*4882a593Smuzhiyun 	hpte_cache = kmem_cache_create("kvm-spt", sizeof(struct hpte_cache),
378*4882a593Smuzhiyun 				       sizeof(struct hpte_cache), 0, NULL);
379*4882a593Smuzhiyun 
380*4882a593Smuzhiyun 	return 0;
381*4882a593Smuzhiyun }
382*4882a593Smuzhiyun 
kvmppc_mmu_hpte_sysexit(void)383*4882a593Smuzhiyun void kvmppc_mmu_hpte_sysexit(void)
384*4882a593Smuzhiyun {
385*4882a593Smuzhiyun 	kmem_cache_destroy(hpte_cache);
386*4882a593Smuzhiyun }
387