1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0-only */
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Copyright (C) 2008-2011 Freescale Semiconductor, Inc. All rights reserved.
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * Author: Yu Liu <yu.liu@freescale.com>
6*4882a593Smuzhiyun * Scott Wood <scottwood@freescale.com>
7*4882a593Smuzhiyun * Ashish Kalra <ashish.kalra@freescale.com>
8*4882a593Smuzhiyun * Varun Sethi <varun.sethi@freescale.com>
9*4882a593Smuzhiyun *
10*4882a593Smuzhiyun * Description:
11*4882a593Smuzhiyun * This file is based on arch/powerpc/kvm/44x_tlb.h and
12*4882a593Smuzhiyun * arch/powerpc/include/asm/kvm_44x.h by Hollis Blanchard <hollisb@us.ibm.com>,
13*4882a593Smuzhiyun * Copyright IBM Corp. 2007-2008
14*4882a593Smuzhiyun */
15*4882a593Smuzhiyun
16*4882a593Smuzhiyun #ifndef KVM_E500_H
17*4882a593Smuzhiyun #define KVM_E500_H
18*4882a593Smuzhiyun
19*4882a593Smuzhiyun #include <linux/kvm_host.h>
20*4882a593Smuzhiyun #include <asm/nohash/mmu-book3e.h>
21*4882a593Smuzhiyun #include <asm/tlb.h>
22*4882a593Smuzhiyun #include <asm/cputhreads.h>
23*4882a593Smuzhiyun
24*4882a593Smuzhiyun enum vcpu_ftr {
25*4882a593Smuzhiyun VCPU_FTR_MMU_V2
26*4882a593Smuzhiyun };
27*4882a593Smuzhiyun
28*4882a593Smuzhiyun #define E500_PID_NUM 3
29*4882a593Smuzhiyun #define E500_TLB_NUM 2
30*4882a593Smuzhiyun
31*4882a593Smuzhiyun /* entry is mapped somewhere in host TLB */
32*4882a593Smuzhiyun #define E500_TLB_VALID (1 << 31)
33*4882a593Smuzhiyun /* TLB1 entry is mapped by host TLB1, tracked by bitmaps */
34*4882a593Smuzhiyun #define E500_TLB_BITMAP (1 << 30)
35*4882a593Smuzhiyun /* TLB1 entry is mapped by host TLB0 */
36*4882a593Smuzhiyun #define E500_TLB_TLB0 (1 << 29)
37*4882a593Smuzhiyun /* bits [6-5] MAS2_X1 and MAS2_X0 and [4-0] bits for WIMGE */
38*4882a593Smuzhiyun #define E500_TLB_MAS2_ATTR (0x7f)
39*4882a593Smuzhiyun
40*4882a593Smuzhiyun struct tlbe_ref {
41*4882a593Smuzhiyun kvm_pfn_t pfn; /* valid only for TLB0, except briefly */
42*4882a593Smuzhiyun unsigned int flags; /* E500_TLB_* */
43*4882a593Smuzhiyun };
44*4882a593Smuzhiyun
45*4882a593Smuzhiyun struct tlbe_priv {
46*4882a593Smuzhiyun struct tlbe_ref ref;
47*4882a593Smuzhiyun };
48*4882a593Smuzhiyun
49*4882a593Smuzhiyun #ifdef CONFIG_KVM_E500V2
50*4882a593Smuzhiyun struct vcpu_id_table;
51*4882a593Smuzhiyun #endif
52*4882a593Smuzhiyun
53*4882a593Smuzhiyun struct kvmppc_e500_tlb_params {
54*4882a593Smuzhiyun int entries, ways, sets;
55*4882a593Smuzhiyun };
56*4882a593Smuzhiyun
57*4882a593Smuzhiyun struct kvmppc_vcpu_e500 {
58*4882a593Smuzhiyun struct kvm_vcpu vcpu;
59*4882a593Smuzhiyun
60*4882a593Smuzhiyun /* Unmodified copy of the guest's TLB -- shared with host userspace. */
61*4882a593Smuzhiyun struct kvm_book3e_206_tlb_entry *gtlb_arch;
62*4882a593Smuzhiyun
63*4882a593Smuzhiyun /* Starting entry number in gtlb_arch[] */
64*4882a593Smuzhiyun int gtlb_offset[E500_TLB_NUM];
65*4882a593Smuzhiyun
66*4882a593Smuzhiyun /* KVM internal information associated with each guest TLB entry */
67*4882a593Smuzhiyun struct tlbe_priv *gtlb_priv[E500_TLB_NUM];
68*4882a593Smuzhiyun
69*4882a593Smuzhiyun struct kvmppc_e500_tlb_params gtlb_params[E500_TLB_NUM];
70*4882a593Smuzhiyun
71*4882a593Smuzhiyun unsigned int gtlb_nv[E500_TLB_NUM];
72*4882a593Smuzhiyun
73*4882a593Smuzhiyun unsigned int host_tlb1_nv;
74*4882a593Smuzhiyun
75*4882a593Smuzhiyun u32 svr;
76*4882a593Smuzhiyun u32 l1csr0;
77*4882a593Smuzhiyun u32 l1csr1;
78*4882a593Smuzhiyun u32 hid0;
79*4882a593Smuzhiyun u32 hid1;
80*4882a593Smuzhiyun u64 mcar;
81*4882a593Smuzhiyun
82*4882a593Smuzhiyun struct page **shared_tlb_pages;
83*4882a593Smuzhiyun int num_shared_tlb_pages;
84*4882a593Smuzhiyun
85*4882a593Smuzhiyun u64 *g2h_tlb1_map;
86*4882a593Smuzhiyun unsigned int *h2g_tlb1_rmap;
87*4882a593Smuzhiyun
88*4882a593Smuzhiyun /* Minimum and maximum address mapped my TLB1 */
89*4882a593Smuzhiyun unsigned long tlb1_min_eaddr;
90*4882a593Smuzhiyun unsigned long tlb1_max_eaddr;
91*4882a593Smuzhiyun
92*4882a593Smuzhiyun #ifdef CONFIG_KVM_E500V2
93*4882a593Smuzhiyun u32 pid[E500_PID_NUM];
94*4882a593Smuzhiyun
95*4882a593Smuzhiyun /* vcpu id table */
96*4882a593Smuzhiyun struct vcpu_id_table *idt;
97*4882a593Smuzhiyun #endif
98*4882a593Smuzhiyun };
99*4882a593Smuzhiyun
to_e500(struct kvm_vcpu * vcpu)100*4882a593Smuzhiyun static inline struct kvmppc_vcpu_e500 *to_e500(struct kvm_vcpu *vcpu)
101*4882a593Smuzhiyun {
102*4882a593Smuzhiyun return container_of(vcpu, struct kvmppc_vcpu_e500, vcpu);
103*4882a593Smuzhiyun }
104*4882a593Smuzhiyun
105*4882a593Smuzhiyun
106*4882a593Smuzhiyun /* This geometry is the legacy default -- can be overridden by userspace */
107*4882a593Smuzhiyun #define KVM_E500_TLB0_WAY_SIZE 128
108*4882a593Smuzhiyun #define KVM_E500_TLB0_WAY_NUM 2
109*4882a593Smuzhiyun
110*4882a593Smuzhiyun #define KVM_E500_TLB0_SIZE (KVM_E500_TLB0_WAY_SIZE * KVM_E500_TLB0_WAY_NUM)
111*4882a593Smuzhiyun #define KVM_E500_TLB1_SIZE 16
112*4882a593Smuzhiyun
113*4882a593Smuzhiyun #define index_of(tlbsel, esel) (((tlbsel) << 16) | ((esel) & 0xFFFF))
114*4882a593Smuzhiyun #define tlbsel_of(index) ((index) >> 16)
115*4882a593Smuzhiyun #define esel_of(index) ((index) & 0xFFFF)
116*4882a593Smuzhiyun
117*4882a593Smuzhiyun #define E500_TLB_USER_PERM_MASK (MAS3_UX|MAS3_UR|MAS3_UW)
118*4882a593Smuzhiyun #define E500_TLB_SUPER_PERM_MASK (MAS3_SX|MAS3_SR|MAS3_SW)
119*4882a593Smuzhiyun #define MAS2_ATTRIB_MASK \
120*4882a593Smuzhiyun (MAS2_X0 | MAS2_X1 | MAS2_E | MAS2_G)
121*4882a593Smuzhiyun #define MAS3_ATTRIB_MASK \
122*4882a593Smuzhiyun (MAS3_U0 | MAS3_U1 | MAS3_U2 | MAS3_U3 \
123*4882a593Smuzhiyun | E500_TLB_USER_PERM_MASK | E500_TLB_SUPER_PERM_MASK)
124*4882a593Smuzhiyun
125*4882a593Smuzhiyun int kvmppc_e500_emul_mt_mmucsr0(struct kvmppc_vcpu_e500 *vcpu_e500,
126*4882a593Smuzhiyun ulong value);
127*4882a593Smuzhiyun int kvmppc_e500_emul_tlbwe(struct kvm_vcpu *vcpu);
128*4882a593Smuzhiyun int kvmppc_e500_emul_tlbre(struct kvm_vcpu *vcpu);
129*4882a593Smuzhiyun int kvmppc_e500_emul_tlbivax(struct kvm_vcpu *vcpu, gva_t ea);
130*4882a593Smuzhiyun int kvmppc_e500_emul_tlbilx(struct kvm_vcpu *vcpu, int type, gva_t ea);
131*4882a593Smuzhiyun int kvmppc_e500_emul_tlbsx(struct kvm_vcpu *vcpu, gva_t ea);
132*4882a593Smuzhiyun int kvmppc_e500_tlb_init(struct kvmppc_vcpu_e500 *vcpu_e500);
133*4882a593Smuzhiyun void kvmppc_e500_tlb_uninit(struct kvmppc_vcpu_e500 *vcpu_e500);
134*4882a593Smuzhiyun
135*4882a593Smuzhiyun void kvmppc_get_sregs_e500_tlb(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
136*4882a593Smuzhiyun int kvmppc_set_sregs_e500_tlb(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
137*4882a593Smuzhiyun
138*4882a593Smuzhiyun int kvmppc_get_one_reg_e500_tlb(struct kvm_vcpu *vcpu, u64 id,
139*4882a593Smuzhiyun union kvmppc_one_reg *val);
140*4882a593Smuzhiyun int kvmppc_set_one_reg_e500_tlb(struct kvm_vcpu *vcpu, u64 id,
141*4882a593Smuzhiyun union kvmppc_one_reg *val);
142*4882a593Smuzhiyun
143*4882a593Smuzhiyun #ifdef CONFIG_KVM_E500V2
144*4882a593Smuzhiyun unsigned int kvmppc_e500_get_sid(struct kvmppc_vcpu_e500 *vcpu_e500,
145*4882a593Smuzhiyun unsigned int as, unsigned int gid,
146*4882a593Smuzhiyun unsigned int pr, int avoid_recursion);
147*4882a593Smuzhiyun #endif
148*4882a593Smuzhiyun
149*4882a593Smuzhiyun /* TLB helper functions */
150*4882a593Smuzhiyun static inline unsigned int
get_tlb_size(const struct kvm_book3e_206_tlb_entry * tlbe)151*4882a593Smuzhiyun get_tlb_size(const struct kvm_book3e_206_tlb_entry *tlbe)
152*4882a593Smuzhiyun {
153*4882a593Smuzhiyun return (tlbe->mas1 >> 7) & 0x1f;
154*4882a593Smuzhiyun }
155*4882a593Smuzhiyun
get_tlb_eaddr(const struct kvm_book3e_206_tlb_entry * tlbe)156*4882a593Smuzhiyun static inline gva_t get_tlb_eaddr(const struct kvm_book3e_206_tlb_entry *tlbe)
157*4882a593Smuzhiyun {
158*4882a593Smuzhiyun return tlbe->mas2 & MAS2_EPN;
159*4882a593Smuzhiyun }
160*4882a593Smuzhiyun
get_tlb_bytes(const struct kvm_book3e_206_tlb_entry * tlbe)161*4882a593Smuzhiyun static inline u64 get_tlb_bytes(const struct kvm_book3e_206_tlb_entry *tlbe)
162*4882a593Smuzhiyun {
163*4882a593Smuzhiyun unsigned int pgsize = get_tlb_size(tlbe);
164*4882a593Smuzhiyun return 1ULL << 10 << pgsize;
165*4882a593Smuzhiyun }
166*4882a593Smuzhiyun
get_tlb_end(const struct kvm_book3e_206_tlb_entry * tlbe)167*4882a593Smuzhiyun static inline gva_t get_tlb_end(const struct kvm_book3e_206_tlb_entry *tlbe)
168*4882a593Smuzhiyun {
169*4882a593Smuzhiyun u64 bytes = get_tlb_bytes(tlbe);
170*4882a593Smuzhiyun return get_tlb_eaddr(tlbe) + bytes - 1;
171*4882a593Smuzhiyun }
172*4882a593Smuzhiyun
get_tlb_raddr(const struct kvm_book3e_206_tlb_entry * tlbe)173*4882a593Smuzhiyun static inline u64 get_tlb_raddr(const struct kvm_book3e_206_tlb_entry *tlbe)
174*4882a593Smuzhiyun {
175*4882a593Smuzhiyun return tlbe->mas7_3 & ~0xfffULL;
176*4882a593Smuzhiyun }
177*4882a593Smuzhiyun
178*4882a593Smuzhiyun static inline unsigned int
get_tlb_tid(const struct kvm_book3e_206_tlb_entry * tlbe)179*4882a593Smuzhiyun get_tlb_tid(const struct kvm_book3e_206_tlb_entry *tlbe)
180*4882a593Smuzhiyun {
181*4882a593Smuzhiyun return (tlbe->mas1 >> 16) & 0xff;
182*4882a593Smuzhiyun }
183*4882a593Smuzhiyun
184*4882a593Smuzhiyun static inline unsigned int
get_tlb_ts(const struct kvm_book3e_206_tlb_entry * tlbe)185*4882a593Smuzhiyun get_tlb_ts(const struct kvm_book3e_206_tlb_entry *tlbe)
186*4882a593Smuzhiyun {
187*4882a593Smuzhiyun return (tlbe->mas1 >> 12) & 0x1;
188*4882a593Smuzhiyun }
189*4882a593Smuzhiyun
190*4882a593Smuzhiyun static inline unsigned int
get_tlb_v(const struct kvm_book3e_206_tlb_entry * tlbe)191*4882a593Smuzhiyun get_tlb_v(const struct kvm_book3e_206_tlb_entry *tlbe)
192*4882a593Smuzhiyun {
193*4882a593Smuzhiyun return (tlbe->mas1 >> 31) & 0x1;
194*4882a593Smuzhiyun }
195*4882a593Smuzhiyun
196*4882a593Smuzhiyun static inline unsigned int
get_tlb_iprot(const struct kvm_book3e_206_tlb_entry * tlbe)197*4882a593Smuzhiyun get_tlb_iprot(const struct kvm_book3e_206_tlb_entry *tlbe)
198*4882a593Smuzhiyun {
199*4882a593Smuzhiyun return (tlbe->mas1 >> 30) & 0x1;
200*4882a593Smuzhiyun }
201*4882a593Smuzhiyun
202*4882a593Smuzhiyun static inline unsigned int
get_tlb_tsize(const struct kvm_book3e_206_tlb_entry * tlbe)203*4882a593Smuzhiyun get_tlb_tsize(const struct kvm_book3e_206_tlb_entry *tlbe)
204*4882a593Smuzhiyun {
205*4882a593Smuzhiyun return (tlbe->mas1 & MAS1_TSIZE_MASK) >> MAS1_TSIZE_SHIFT;
206*4882a593Smuzhiyun }
207*4882a593Smuzhiyun
get_cur_pid(struct kvm_vcpu * vcpu)208*4882a593Smuzhiyun static inline unsigned int get_cur_pid(struct kvm_vcpu *vcpu)
209*4882a593Smuzhiyun {
210*4882a593Smuzhiyun return vcpu->arch.pid & 0xff;
211*4882a593Smuzhiyun }
212*4882a593Smuzhiyun
get_cur_as(struct kvm_vcpu * vcpu)213*4882a593Smuzhiyun static inline unsigned int get_cur_as(struct kvm_vcpu *vcpu)
214*4882a593Smuzhiyun {
215*4882a593Smuzhiyun return !!(vcpu->arch.shared->msr & (MSR_IS | MSR_DS));
216*4882a593Smuzhiyun }
217*4882a593Smuzhiyun
get_cur_pr(struct kvm_vcpu * vcpu)218*4882a593Smuzhiyun static inline unsigned int get_cur_pr(struct kvm_vcpu *vcpu)
219*4882a593Smuzhiyun {
220*4882a593Smuzhiyun return !!(vcpu->arch.shared->msr & MSR_PR);
221*4882a593Smuzhiyun }
222*4882a593Smuzhiyun
get_cur_spid(const struct kvm_vcpu * vcpu)223*4882a593Smuzhiyun static inline unsigned int get_cur_spid(const struct kvm_vcpu *vcpu)
224*4882a593Smuzhiyun {
225*4882a593Smuzhiyun return (vcpu->arch.shared->mas6 >> 16) & 0xff;
226*4882a593Smuzhiyun }
227*4882a593Smuzhiyun
get_cur_sas(const struct kvm_vcpu * vcpu)228*4882a593Smuzhiyun static inline unsigned int get_cur_sas(const struct kvm_vcpu *vcpu)
229*4882a593Smuzhiyun {
230*4882a593Smuzhiyun return vcpu->arch.shared->mas6 & 0x1;
231*4882a593Smuzhiyun }
232*4882a593Smuzhiyun
get_tlb_tlbsel(const struct kvm_vcpu * vcpu)233*4882a593Smuzhiyun static inline unsigned int get_tlb_tlbsel(const struct kvm_vcpu *vcpu)
234*4882a593Smuzhiyun {
235*4882a593Smuzhiyun /*
236*4882a593Smuzhiyun * Manual says that tlbsel has 2 bits wide.
237*4882a593Smuzhiyun * Since we only have two TLBs, only lower bit is used.
238*4882a593Smuzhiyun */
239*4882a593Smuzhiyun return (vcpu->arch.shared->mas0 >> 28) & 0x1;
240*4882a593Smuzhiyun }
241*4882a593Smuzhiyun
get_tlb_nv_bit(const struct kvm_vcpu * vcpu)242*4882a593Smuzhiyun static inline unsigned int get_tlb_nv_bit(const struct kvm_vcpu *vcpu)
243*4882a593Smuzhiyun {
244*4882a593Smuzhiyun return vcpu->arch.shared->mas0 & 0xfff;
245*4882a593Smuzhiyun }
246*4882a593Smuzhiyun
get_tlb_esel_bit(const struct kvm_vcpu * vcpu)247*4882a593Smuzhiyun static inline unsigned int get_tlb_esel_bit(const struct kvm_vcpu *vcpu)
248*4882a593Smuzhiyun {
249*4882a593Smuzhiyun return (vcpu->arch.shared->mas0 >> 16) & 0xfff;
250*4882a593Smuzhiyun }
251*4882a593Smuzhiyun
tlbe_is_host_safe(const struct kvm_vcpu * vcpu,const struct kvm_book3e_206_tlb_entry * tlbe)252*4882a593Smuzhiyun static inline int tlbe_is_host_safe(const struct kvm_vcpu *vcpu,
253*4882a593Smuzhiyun const struct kvm_book3e_206_tlb_entry *tlbe)
254*4882a593Smuzhiyun {
255*4882a593Smuzhiyun gpa_t gpa;
256*4882a593Smuzhiyun
257*4882a593Smuzhiyun if (!get_tlb_v(tlbe))
258*4882a593Smuzhiyun return 0;
259*4882a593Smuzhiyun
260*4882a593Smuzhiyun #ifndef CONFIG_KVM_BOOKE_HV
261*4882a593Smuzhiyun /* Does it match current guest AS? */
262*4882a593Smuzhiyun /* XXX what about IS != DS? */
263*4882a593Smuzhiyun if (get_tlb_ts(tlbe) != !!(vcpu->arch.shared->msr & MSR_IS))
264*4882a593Smuzhiyun return 0;
265*4882a593Smuzhiyun #endif
266*4882a593Smuzhiyun
267*4882a593Smuzhiyun gpa = get_tlb_raddr(tlbe);
268*4882a593Smuzhiyun if (!gfn_to_memslot(vcpu->kvm, gpa >> PAGE_SHIFT))
269*4882a593Smuzhiyun /* Mapping is not for RAM. */
270*4882a593Smuzhiyun return 0;
271*4882a593Smuzhiyun
272*4882a593Smuzhiyun return 1;
273*4882a593Smuzhiyun }
274*4882a593Smuzhiyun
get_entry(struct kvmppc_vcpu_e500 * vcpu_e500,int tlbsel,int entry)275*4882a593Smuzhiyun static inline struct kvm_book3e_206_tlb_entry *get_entry(
276*4882a593Smuzhiyun struct kvmppc_vcpu_e500 *vcpu_e500, int tlbsel, int entry)
277*4882a593Smuzhiyun {
278*4882a593Smuzhiyun int offset = vcpu_e500->gtlb_offset[tlbsel];
279*4882a593Smuzhiyun return &vcpu_e500->gtlb_arch[offset + entry];
280*4882a593Smuzhiyun }
281*4882a593Smuzhiyun
282*4882a593Smuzhiyun void kvmppc_e500_tlbil_one(struct kvmppc_vcpu_e500 *vcpu_e500,
283*4882a593Smuzhiyun struct kvm_book3e_206_tlb_entry *gtlbe);
284*4882a593Smuzhiyun void kvmppc_e500_tlbil_all(struct kvmppc_vcpu_e500 *vcpu_e500);
285*4882a593Smuzhiyun
286*4882a593Smuzhiyun #ifdef CONFIG_KVM_BOOKE_HV
287*4882a593Smuzhiyun #define kvmppc_e500_get_tlb_stid(vcpu, gtlbe) get_tlb_tid(gtlbe)
288*4882a593Smuzhiyun #define get_tlbmiss_tid(vcpu) get_cur_pid(vcpu)
289*4882a593Smuzhiyun #define get_tlb_sts(gtlbe) (gtlbe->mas1 & MAS1_TS)
290*4882a593Smuzhiyun
291*4882a593Smuzhiyun /*
292*4882a593Smuzhiyun * These functions should be called with preemption disabled
293*4882a593Smuzhiyun * and the returned value is valid only in that context
294*4882a593Smuzhiyun */
get_thread_specific_lpid(int vm_lpid)295*4882a593Smuzhiyun static inline int get_thread_specific_lpid(int vm_lpid)
296*4882a593Smuzhiyun {
297*4882a593Smuzhiyun int vcpu_lpid = vm_lpid;
298*4882a593Smuzhiyun
299*4882a593Smuzhiyun if (threads_per_core == 2)
300*4882a593Smuzhiyun vcpu_lpid |= smp_processor_id() & 1;
301*4882a593Smuzhiyun
302*4882a593Smuzhiyun return vcpu_lpid;
303*4882a593Smuzhiyun }
304*4882a593Smuzhiyun
get_lpid(struct kvm_vcpu * vcpu)305*4882a593Smuzhiyun static inline int get_lpid(struct kvm_vcpu *vcpu)
306*4882a593Smuzhiyun {
307*4882a593Smuzhiyun return get_thread_specific_lpid(vcpu->kvm->arch.lpid);
308*4882a593Smuzhiyun }
309*4882a593Smuzhiyun #else
310*4882a593Smuzhiyun unsigned int kvmppc_e500_get_tlb_stid(struct kvm_vcpu *vcpu,
311*4882a593Smuzhiyun struct kvm_book3e_206_tlb_entry *gtlbe);
312*4882a593Smuzhiyun
get_tlbmiss_tid(struct kvm_vcpu * vcpu)313*4882a593Smuzhiyun static inline unsigned int get_tlbmiss_tid(struct kvm_vcpu *vcpu)
314*4882a593Smuzhiyun {
315*4882a593Smuzhiyun struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
316*4882a593Smuzhiyun unsigned int tidseld = (vcpu->arch.shared->mas4 >> 16) & 0xf;
317*4882a593Smuzhiyun
318*4882a593Smuzhiyun return vcpu_e500->pid[tidseld];
319*4882a593Smuzhiyun }
320*4882a593Smuzhiyun
321*4882a593Smuzhiyun /* Force TS=1 for all guest mappings. */
322*4882a593Smuzhiyun #define get_tlb_sts(gtlbe) (MAS1_TS)
323*4882a593Smuzhiyun #endif /* !BOOKE_HV */
324*4882a593Smuzhiyun
has_feature(const struct kvm_vcpu * vcpu,enum vcpu_ftr ftr)325*4882a593Smuzhiyun static inline bool has_feature(const struct kvm_vcpu *vcpu,
326*4882a593Smuzhiyun enum vcpu_ftr ftr)
327*4882a593Smuzhiyun {
328*4882a593Smuzhiyun bool has_ftr;
329*4882a593Smuzhiyun switch (ftr) {
330*4882a593Smuzhiyun case VCPU_FTR_MMU_V2:
331*4882a593Smuzhiyun has_ftr = ((vcpu->arch.mmucfg & MMUCFG_MAVN) == MMUCFG_MAVN_V2);
332*4882a593Smuzhiyun break;
333*4882a593Smuzhiyun default:
334*4882a593Smuzhiyun return false;
335*4882a593Smuzhiyun }
336*4882a593Smuzhiyun return has_ftr;
337*4882a593Smuzhiyun }
338*4882a593Smuzhiyun
339*4882a593Smuzhiyun #endif /* KVM_E500_H */
340