xref: /OK3568_Linux_fs/kernel/arch/powerpc/kvm/e500_emulate.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Copyright (C) 2008-2011 Freescale Semiconductor, Inc. All rights reserved.
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  * Author: Yu Liu, <yu.liu@freescale.com>
6*4882a593Smuzhiyun  *
7*4882a593Smuzhiyun  * Description:
8*4882a593Smuzhiyun  * This file is derived from arch/powerpc/kvm/44x_emulate.c,
9*4882a593Smuzhiyun  * by Hollis Blanchard <hollisb@us.ibm.com>.
10*4882a593Smuzhiyun  */
11*4882a593Smuzhiyun 
12*4882a593Smuzhiyun #include <asm/kvm_ppc.h>
13*4882a593Smuzhiyun #include <asm/disassemble.h>
14*4882a593Smuzhiyun #include <asm/dbell.h>
15*4882a593Smuzhiyun #include <asm/reg_booke.h>
16*4882a593Smuzhiyun 
17*4882a593Smuzhiyun #include "booke.h"
18*4882a593Smuzhiyun #include "e500.h"
19*4882a593Smuzhiyun 
20*4882a593Smuzhiyun #define XOP_DCBTLS  166
21*4882a593Smuzhiyun #define XOP_MSGSND  206
22*4882a593Smuzhiyun #define XOP_MSGCLR  238
23*4882a593Smuzhiyun #define XOP_MFTMR   366
24*4882a593Smuzhiyun #define XOP_TLBIVAX 786
25*4882a593Smuzhiyun #define XOP_TLBSX   914
26*4882a593Smuzhiyun #define XOP_TLBRE   946
27*4882a593Smuzhiyun #define XOP_TLBWE   978
28*4882a593Smuzhiyun #define XOP_TLBILX  18
29*4882a593Smuzhiyun #define XOP_EHPRIV  270
30*4882a593Smuzhiyun 
31*4882a593Smuzhiyun #ifdef CONFIG_KVM_E500MC
dbell2prio(ulong param)32*4882a593Smuzhiyun static int dbell2prio(ulong param)
33*4882a593Smuzhiyun {
34*4882a593Smuzhiyun 	int msg = param & PPC_DBELL_TYPE_MASK;
35*4882a593Smuzhiyun 	int prio = -1;
36*4882a593Smuzhiyun 
37*4882a593Smuzhiyun 	switch (msg) {
38*4882a593Smuzhiyun 	case PPC_DBELL_TYPE(PPC_DBELL):
39*4882a593Smuzhiyun 		prio = BOOKE_IRQPRIO_DBELL;
40*4882a593Smuzhiyun 		break;
41*4882a593Smuzhiyun 	case PPC_DBELL_TYPE(PPC_DBELL_CRIT):
42*4882a593Smuzhiyun 		prio = BOOKE_IRQPRIO_DBELL_CRIT;
43*4882a593Smuzhiyun 		break;
44*4882a593Smuzhiyun 	default:
45*4882a593Smuzhiyun 		break;
46*4882a593Smuzhiyun 	}
47*4882a593Smuzhiyun 
48*4882a593Smuzhiyun 	return prio;
49*4882a593Smuzhiyun }
50*4882a593Smuzhiyun 
kvmppc_e500_emul_msgclr(struct kvm_vcpu * vcpu,int rb)51*4882a593Smuzhiyun static int kvmppc_e500_emul_msgclr(struct kvm_vcpu *vcpu, int rb)
52*4882a593Smuzhiyun {
53*4882a593Smuzhiyun 	ulong param = vcpu->arch.regs.gpr[rb];
54*4882a593Smuzhiyun 	int prio = dbell2prio(param);
55*4882a593Smuzhiyun 
56*4882a593Smuzhiyun 	if (prio < 0)
57*4882a593Smuzhiyun 		return EMULATE_FAIL;
58*4882a593Smuzhiyun 
59*4882a593Smuzhiyun 	clear_bit(prio, &vcpu->arch.pending_exceptions);
60*4882a593Smuzhiyun 	return EMULATE_DONE;
61*4882a593Smuzhiyun }
62*4882a593Smuzhiyun 
kvmppc_e500_emul_msgsnd(struct kvm_vcpu * vcpu,int rb)63*4882a593Smuzhiyun static int kvmppc_e500_emul_msgsnd(struct kvm_vcpu *vcpu, int rb)
64*4882a593Smuzhiyun {
65*4882a593Smuzhiyun 	ulong param = vcpu->arch.regs.gpr[rb];
66*4882a593Smuzhiyun 	int prio = dbell2prio(rb);
67*4882a593Smuzhiyun 	int pir = param & PPC_DBELL_PIR_MASK;
68*4882a593Smuzhiyun 	int i;
69*4882a593Smuzhiyun 	struct kvm_vcpu *cvcpu;
70*4882a593Smuzhiyun 
71*4882a593Smuzhiyun 	if (prio < 0)
72*4882a593Smuzhiyun 		return EMULATE_FAIL;
73*4882a593Smuzhiyun 
74*4882a593Smuzhiyun 	kvm_for_each_vcpu(i, cvcpu, vcpu->kvm) {
75*4882a593Smuzhiyun 		int cpir = cvcpu->arch.shared->pir;
76*4882a593Smuzhiyun 		if ((param & PPC_DBELL_MSG_BRDCAST) || (cpir == pir)) {
77*4882a593Smuzhiyun 			set_bit(prio, &cvcpu->arch.pending_exceptions);
78*4882a593Smuzhiyun 			kvm_vcpu_kick(cvcpu);
79*4882a593Smuzhiyun 		}
80*4882a593Smuzhiyun 	}
81*4882a593Smuzhiyun 
82*4882a593Smuzhiyun 	return EMULATE_DONE;
83*4882a593Smuzhiyun }
84*4882a593Smuzhiyun #endif
85*4882a593Smuzhiyun 
kvmppc_e500_emul_ehpriv(struct kvm_vcpu * vcpu,unsigned int inst,int * advance)86*4882a593Smuzhiyun static int kvmppc_e500_emul_ehpriv(struct kvm_vcpu *vcpu,
87*4882a593Smuzhiyun 				   unsigned int inst, int *advance)
88*4882a593Smuzhiyun {
89*4882a593Smuzhiyun 	int emulated = EMULATE_DONE;
90*4882a593Smuzhiyun 
91*4882a593Smuzhiyun 	switch (get_oc(inst)) {
92*4882a593Smuzhiyun 	case EHPRIV_OC_DEBUG:
93*4882a593Smuzhiyun 		vcpu->run->exit_reason = KVM_EXIT_DEBUG;
94*4882a593Smuzhiyun 		vcpu->run->debug.arch.address = vcpu->arch.regs.nip;
95*4882a593Smuzhiyun 		vcpu->run->debug.arch.status = 0;
96*4882a593Smuzhiyun 		kvmppc_account_exit(vcpu, DEBUG_EXITS);
97*4882a593Smuzhiyun 		emulated = EMULATE_EXIT_USER;
98*4882a593Smuzhiyun 		*advance = 0;
99*4882a593Smuzhiyun 		break;
100*4882a593Smuzhiyun 	default:
101*4882a593Smuzhiyun 		emulated = EMULATE_FAIL;
102*4882a593Smuzhiyun 	}
103*4882a593Smuzhiyun 	return emulated;
104*4882a593Smuzhiyun }
105*4882a593Smuzhiyun 
kvmppc_e500_emul_dcbtls(struct kvm_vcpu * vcpu)106*4882a593Smuzhiyun static int kvmppc_e500_emul_dcbtls(struct kvm_vcpu *vcpu)
107*4882a593Smuzhiyun {
108*4882a593Smuzhiyun 	struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
109*4882a593Smuzhiyun 
110*4882a593Smuzhiyun 	/* Always fail to lock the cache */
111*4882a593Smuzhiyun 	vcpu_e500->l1csr0 |= L1CSR0_CUL;
112*4882a593Smuzhiyun 	return EMULATE_DONE;
113*4882a593Smuzhiyun }
114*4882a593Smuzhiyun 
kvmppc_e500_emul_mftmr(struct kvm_vcpu * vcpu,unsigned int inst,int rt)115*4882a593Smuzhiyun static int kvmppc_e500_emul_mftmr(struct kvm_vcpu *vcpu, unsigned int inst,
116*4882a593Smuzhiyun 				  int rt)
117*4882a593Smuzhiyun {
118*4882a593Smuzhiyun 	/* Expose one thread per vcpu */
119*4882a593Smuzhiyun 	if (get_tmrn(inst) == TMRN_TMCFG0) {
120*4882a593Smuzhiyun 		kvmppc_set_gpr(vcpu, rt,
121*4882a593Smuzhiyun 			       1 | (1 << TMRN_TMCFG0_NATHRD_SHIFT));
122*4882a593Smuzhiyun 		return EMULATE_DONE;
123*4882a593Smuzhiyun 	}
124*4882a593Smuzhiyun 
125*4882a593Smuzhiyun 	return EMULATE_FAIL;
126*4882a593Smuzhiyun }
127*4882a593Smuzhiyun 
kvmppc_core_emulate_op_e500(struct kvm_vcpu * vcpu,unsigned int inst,int * advance)128*4882a593Smuzhiyun int kvmppc_core_emulate_op_e500(struct kvm_vcpu *vcpu,
129*4882a593Smuzhiyun 				unsigned int inst, int *advance)
130*4882a593Smuzhiyun {
131*4882a593Smuzhiyun 	int emulated = EMULATE_DONE;
132*4882a593Smuzhiyun 	int ra = get_ra(inst);
133*4882a593Smuzhiyun 	int rb = get_rb(inst);
134*4882a593Smuzhiyun 	int rt = get_rt(inst);
135*4882a593Smuzhiyun 	gva_t ea;
136*4882a593Smuzhiyun 
137*4882a593Smuzhiyun 	switch (get_op(inst)) {
138*4882a593Smuzhiyun 	case 31:
139*4882a593Smuzhiyun 		switch (get_xop(inst)) {
140*4882a593Smuzhiyun 
141*4882a593Smuzhiyun 		case XOP_DCBTLS:
142*4882a593Smuzhiyun 			emulated = kvmppc_e500_emul_dcbtls(vcpu);
143*4882a593Smuzhiyun 			break;
144*4882a593Smuzhiyun 
145*4882a593Smuzhiyun #ifdef CONFIG_KVM_E500MC
146*4882a593Smuzhiyun 		case XOP_MSGSND:
147*4882a593Smuzhiyun 			emulated = kvmppc_e500_emul_msgsnd(vcpu, rb);
148*4882a593Smuzhiyun 			break;
149*4882a593Smuzhiyun 
150*4882a593Smuzhiyun 		case XOP_MSGCLR:
151*4882a593Smuzhiyun 			emulated = kvmppc_e500_emul_msgclr(vcpu, rb);
152*4882a593Smuzhiyun 			break;
153*4882a593Smuzhiyun #endif
154*4882a593Smuzhiyun 
155*4882a593Smuzhiyun 		case XOP_TLBRE:
156*4882a593Smuzhiyun 			emulated = kvmppc_e500_emul_tlbre(vcpu);
157*4882a593Smuzhiyun 			break;
158*4882a593Smuzhiyun 
159*4882a593Smuzhiyun 		case XOP_TLBWE:
160*4882a593Smuzhiyun 			emulated = kvmppc_e500_emul_tlbwe(vcpu);
161*4882a593Smuzhiyun 			break;
162*4882a593Smuzhiyun 
163*4882a593Smuzhiyun 		case XOP_TLBSX:
164*4882a593Smuzhiyun 			ea = kvmppc_get_ea_indexed(vcpu, ra, rb);
165*4882a593Smuzhiyun 			emulated = kvmppc_e500_emul_tlbsx(vcpu, ea);
166*4882a593Smuzhiyun 			break;
167*4882a593Smuzhiyun 
168*4882a593Smuzhiyun 		case XOP_TLBILX: {
169*4882a593Smuzhiyun 			int type = rt & 0x3;
170*4882a593Smuzhiyun 			ea = kvmppc_get_ea_indexed(vcpu, ra, rb);
171*4882a593Smuzhiyun 			emulated = kvmppc_e500_emul_tlbilx(vcpu, type, ea);
172*4882a593Smuzhiyun 			break;
173*4882a593Smuzhiyun 		}
174*4882a593Smuzhiyun 
175*4882a593Smuzhiyun 		case XOP_TLBIVAX:
176*4882a593Smuzhiyun 			ea = kvmppc_get_ea_indexed(vcpu, ra, rb);
177*4882a593Smuzhiyun 			emulated = kvmppc_e500_emul_tlbivax(vcpu, ea);
178*4882a593Smuzhiyun 			break;
179*4882a593Smuzhiyun 
180*4882a593Smuzhiyun 		case XOP_MFTMR:
181*4882a593Smuzhiyun 			emulated = kvmppc_e500_emul_mftmr(vcpu, inst, rt);
182*4882a593Smuzhiyun 			break;
183*4882a593Smuzhiyun 
184*4882a593Smuzhiyun 		case XOP_EHPRIV:
185*4882a593Smuzhiyun 			emulated = kvmppc_e500_emul_ehpriv(vcpu, inst, advance);
186*4882a593Smuzhiyun 			break;
187*4882a593Smuzhiyun 
188*4882a593Smuzhiyun 		default:
189*4882a593Smuzhiyun 			emulated = EMULATE_FAIL;
190*4882a593Smuzhiyun 		}
191*4882a593Smuzhiyun 
192*4882a593Smuzhiyun 		break;
193*4882a593Smuzhiyun 
194*4882a593Smuzhiyun 	default:
195*4882a593Smuzhiyun 		emulated = EMULATE_FAIL;
196*4882a593Smuzhiyun 	}
197*4882a593Smuzhiyun 
198*4882a593Smuzhiyun 	if (emulated == EMULATE_FAIL)
199*4882a593Smuzhiyun 		emulated = kvmppc_booke_emulate_op(vcpu, inst, advance);
200*4882a593Smuzhiyun 
201*4882a593Smuzhiyun 	return emulated;
202*4882a593Smuzhiyun }
203*4882a593Smuzhiyun 
kvmppc_core_emulate_mtspr_e500(struct kvm_vcpu * vcpu,int sprn,ulong spr_val)204*4882a593Smuzhiyun int kvmppc_core_emulate_mtspr_e500(struct kvm_vcpu *vcpu, int sprn, ulong spr_val)
205*4882a593Smuzhiyun {
206*4882a593Smuzhiyun 	struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
207*4882a593Smuzhiyun 	int emulated = EMULATE_DONE;
208*4882a593Smuzhiyun 
209*4882a593Smuzhiyun 	switch (sprn) {
210*4882a593Smuzhiyun #ifndef CONFIG_KVM_BOOKE_HV
211*4882a593Smuzhiyun 	case SPRN_PID:
212*4882a593Smuzhiyun 		kvmppc_set_pid(vcpu, spr_val);
213*4882a593Smuzhiyun 		break;
214*4882a593Smuzhiyun 	case SPRN_PID1:
215*4882a593Smuzhiyun 		if (spr_val != 0)
216*4882a593Smuzhiyun 			return EMULATE_FAIL;
217*4882a593Smuzhiyun 		vcpu_e500->pid[1] = spr_val;
218*4882a593Smuzhiyun 		break;
219*4882a593Smuzhiyun 	case SPRN_PID2:
220*4882a593Smuzhiyun 		if (spr_val != 0)
221*4882a593Smuzhiyun 			return EMULATE_FAIL;
222*4882a593Smuzhiyun 		vcpu_e500->pid[2] = spr_val;
223*4882a593Smuzhiyun 		break;
224*4882a593Smuzhiyun 	case SPRN_MAS0:
225*4882a593Smuzhiyun 		vcpu->arch.shared->mas0 = spr_val;
226*4882a593Smuzhiyun 		break;
227*4882a593Smuzhiyun 	case SPRN_MAS1:
228*4882a593Smuzhiyun 		vcpu->arch.shared->mas1 = spr_val;
229*4882a593Smuzhiyun 		break;
230*4882a593Smuzhiyun 	case SPRN_MAS2:
231*4882a593Smuzhiyun 		vcpu->arch.shared->mas2 = spr_val;
232*4882a593Smuzhiyun 		break;
233*4882a593Smuzhiyun 	case SPRN_MAS3:
234*4882a593Smuzhiyun 		vcpu->arch.shared->mas7_3 &= ~(u64)0xffffffff;
235*4882a593Smuzhiyun 		vcpu->arch.shared->mas7_3 |= spr_val;
236*4882a593Smuzhiyun 		break;
237*4882a593Smuzhiyun 	case SPRN_MAS4:
238*4882a593Smuzhiyun 		vcpu->arch.shared->mas4 = spr_val;
239*4882a593Smuzhiyun 		break;
240*4882a593Smuzhiyun 	case SPRN_MAS6:
241*4882a593Smuzhiyun 		vcpu->arch.shared->mas6 = spr_val;
242*4882a593Smuzhiyun 		break;
243*4882a593Smuzhiyun 	case SPRN_MAS7:
244*4882a593Smuzhiyun 		vcpu->arch.shared->mas7_3 &= (u64)0xffffffff;
245*4882a593Smuzhiyun 		vcpu->arch.shared->mas7_3 |= (u64)spr_val << 32;
246*4882a593Smuzhiyun 		break;
247*4882a593Smuzhiyun #endif
248*4882a593Smuzhiyun 	case SPRN_L1CSR0:
249*4882a593Smuzhiyun 		vcpu_e500->l1csr0 = spr_val;
250*4882a593Smuzhiyun 		vcpu_e500->l1csr0 &= ~(L1CSR0_DCFI | L1CSR0_CLFC);
251*4882a593Smuzhiyun 		break;
252*4882a593Smuzhiyun 	case SPRN_L1CSR1:
253*4882a593Smuzhiyun 		vcpu_e500->l1csr1 = spr_val;
254*4882a593Smuzhiyun 		vcpu_e500->l1csr1 &= ~(L1CSR1_ICFI | L1CSR1_ICLFR);
255*4882a593Smuzhiyun 		break;
256*4882a593Smuzhiyun 	case SPRN_HID0:
257*4882a593Smuzhiyun 		vcpu_e500->hid0 = spr_val;
258*4882a593Smuzhiyun 		break;
259*4882a593Smuzhiyun 	case SPRN_HID1:
260*4882a593Smuzhiyun 		vcpu_e500->hid1 = spr_val;
261*4882a593Smuzhiyun 		break;
262*4882a593Smuzhiyun 
263*4882a593Smuzhiyun 	case SPRN_MMUCSR0:
264*4882a593Smuzhiyun 		emulated = kvmppc_e500_emul_mt_mmucsr0(vcpu_e500,
265*4882a593Smuzhiyun 				spr_val);
266*4882a593Smuzhiyun 		break;
267*4882a593Smuzhiyun 
268*4882a593Smuzhiyun 	case SPRN_PWRMGTCR0:
269*4882a593Smuzhiyun 		/*
270*4882a593Smuzhiyun 		 * Guest relies on host power management configurations
271*4882a593Smuzhiyun 		 * Treat the request as a general store
272*4882a593Smuzhiyun 		 */
273*4882a593Smuzhiyun 		vcpu->arch.pwrmgtcr0 = spr_val;
274*4882a593Smuzhiyun 		break;
275*4882a593Smuzhiyun 
276*4882a593Smuzhiyun 	case SPRN_BUCSR:
277*4882a593Smuzhiyun 		/*
278*4882a593Smuzhiyun 		 * If we are here, it means that we have already flushed the
279*4882a593Smuzhiyun 		 * branch predictor, so just return to guest.
280*4882a593Smuzhiyun 		 */
281*4882a593Smuzhiyun 		break;
282*4882a593Smuzhiyun 
283*4882a593Smuzhiyun 	/* extra exceptions */
284*4882a593Smuzhiyun #ifdef CONFIG_SPE_POSSIBLE
285*4882a593Smuzhiyun 	case SPRN_IVOR32:
286*4882a593Smuzhiyun 		vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_UNAVAIL] = spr_val;
287*4882a593Smuzhiyun 		break;
288*4882a593Smuzhiyun 	case SPRN_IVOR33:
289*4882a593Smuzhiyun 		vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_FP_DATA] = spr_val;
290*4882a593Smuzhiyun 		break;
291*4882a593Smuzhiyun 	case SPRN_IVOR34:
292*4882a593Smuzhiyun 		vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_FP_ROUND] = spr_val;
293*4882a593Smuzhiyun 		break;
294*4882a593Smuzhiyun #endif
295*4882a593Smuzhiyun #ifdef CONFIG_ALTIVEC
296*4882a593Smuzhiyun 	case SPRN_IVOR32:
297*4882a593Smuzhiyun 		vcpu->arch.ivor[BOOKE_IRQPRIO_ALTIVEC_UNAVAIL] = spr_val;
298*4882a593Smuzhiyun 		break;
299*4882a593Smuzhiyun 	case SPRN_IVOR33:
300*4882a593Smuzhiyun 		vcpu->arch.ivor[BOOKE_IRQPRIO_ALTIVEC_ASSIST] = spr_val;
301*4882a593Smuzhiyun 		break;
302*4882a593Smuzhiyun #endif
303*4882a593Smuzhiyun 	case SPRN_IVOR35:
304*4882a593Smuzhiyun 		vcpu->arch.ivor[BOOKE_IRQPRIO_PERFORMANCE_MONITOR] = spr_val;
305*4882a593Smuzhiyun 		break;
306*4882a593Smuzhiyun #ifdef CONFIG_KVM_BOOKE_HV
307*4882a593Smuzhiyun 	case SPRN_IVOR36:
308*4882a593Smuzhiyun 		vcpu->arch.ivor[BOOKE_IRQPRIO_DBELL] = spr_val;
309*4882a593Smuzhiyun 		break;
310*4882a593Smuzhiyun 	case SPRN_IVOR37:
311*4882a593Smuzhiyun 		vcpu->arch.ivor[BOOKE_IRQPRIO_DBELL_CRIT] = spr_val;
312*4882a593Smuzhiyun 		break;
313*4882a593Smuzhiyun #endif
314*4882a593Smuzhiyun 	default:
315*4882a593Smuzhiyun 		emulated = kvmppc_booke_emulate_mtspr(vcpu, sprn, spr_val);
316*4882a593Smuzhiyun 	}
317*4882a593Smuzhiyun 
318*4882a593Smuzhiyun 	return emulated;
319*4882a593Smuzhiyun }
320*4882a593Smuzhiyun 
kvmppc_core_emulate_mfspr_e500(struct kvm_vcpu * vcpu,int sprn,ulong * spr_val)321*4882a593Smuzhiyun int kvmppc_core_emulate_mfspr_e500(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val)
322*4882a593Smuzhiyun {
323*4882a593Smuzhiyun 	struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
324*4882a593Smuzhiyun 	int emulated = EMULATE_DONE;
325*4882a593Smuzhiyun 
326*4882a593Smuzhiyun 	switch (sprn) {
327*4882a593Smuzhiyun #ifndef CONFIG_KVM_BOOKE_HV
328*4882a593Smuzhiyun 	case SPRN_PID:
329*4882a593Smuzhiyun 		*spr_val = vcpu_e500->pid[0];
330*4882a593Smuzhiyun 		break;
331*4882a593Smuzhiyun 	case SPRN_PID1:
332*4882a593Smuzhiyun 		*spr_val = vcpu_e500->pid[1];
333*4882a593Smuzhiyun 		break;
334*4882a593Smuzhiyun 	case SPRN_PID2:
335*4882a593Smuzhiyun 		*spr_val = vcpu_e500->pid[2];
336*4882a593Smuzhiyun 		break;
337*4882a593Smuzhiyun 	case SPRN_MAS0:
338*4882a593Smuzhiyun 		*spr_val = vcpu->arch.shared->mas0;
339*4882a593Smuzhiyun 		break;
340*4882a593Smuzhiyun 	case SPRN_MAS1:
341*4882a593Smuzhiyun 		*spr_val = vcpu->arch.shared->mas1;
342*4882a593Smuzhiyun 		break;
343*4882a593Smuzhiyun 	case SPRN_MAS2:
344*4882a593Smuzhiyun 		*spr_val = vcpu->arch.shared->mas2;
345*4882a593Smuzhiyun 		break;
346*4882a593Smuzhiyun 	case SPRN_MAS3:
347*4882a593Smuzhiyun 		*spr_val = (u32)vcpu->arch.shared->mas7_3;
348*4882a593Smuzhiyun 		break;
349*4882a593Smuzhiyun 	case SPRN_MAS4:
350*4882a593Smuzhiyun 		*spr_val = vcpu->arch.shared->mas4;
351*4882a593Smuzhiyun 		break;
352*4882a593Smuzhiyun 	case SPRN_MAS6:
353*4882a593Smuzhiyun 		*spr_val = vcpu->arch.shared->mas6;
354*4882a593Smuzhiyun 		break;
355*4882a593Smuzhiyun 	case SPRN_MAS7:
356*4882a593Smuzhiyun 		*spr_val = vcpu->arch.shared->mas7_3 >> 32;
357*4882a593Smuzhiyun 		break;
358*4882a593Smuzhiyun #endif
359*4882a593Smuzhiyun 	case SPRN_DECAR:
360*4882a593Smuzhiyun 		*spr_val = vcpu->arch.decar;
361*4882a593Smuzhiyun 		break;
362*4882a593Smuzhiyun 	case SPRN_TLB0CFG:
363*4882a593Smuzhiyun 		*spr_val = vcpu->arch.tlbcfg[0];
364*4882a593Smuzhiyun 		break;
365*4882a593Smuzhiyun 	case SPRN_TLB1CFG:
366*4882a593Smuzhiyun 		*spr_val = vcpu->arch.tlbcfg[1];
367*4882a593Smuzhiyun 		break;
368*4882a593Smuzhiyun 	case SPRN_TLB0PS:
369*4882a593Smuzhiyun 		if (!has_feature(vcpu, VCPU_FTR_MMU_V2))
370*4882a593Smuzhiyun 			return EMULATE_FAIL;
371*4882a593Smuzhiyun 		*spr_val = vcpu->arch.tlbps[0];
372*4882a593Smuzhiyun 		break;
373*4882a593Smuzhiyun 	case SPRN_TLB1PS:
374*4882a593Smuzhiyun 		if (!has_feature(vcpu, VCPU_FTR_MMU_V2))
375*4882a593Smuzhiyun 			return EMULATE_FAIL;
376*4882a593Smuzhiyun 		*spr_val = vcpu->arch.tlbps[1];
377*4882a593Smuzhiyun 		break;
378*4882a593Smuzhiyun 	case SPRN_L1CSR0:
379*4882a593Smuzhiyun 		*spr_val = vcpu_e500->l1csr0;
380*4882a593Smuzhiyun 		break;
381*4882a593Smuzhiyun 	case SPRN_L1CSR1:
382*4882a593Smuzhiyun 		*spr_val = vcpu_e500->l1csr1;
383*4882a593Smuzhiyun 		break;
384*4882a593Smuzhiyun 	case SPRN_HID0:
385*4882a593Smuzhiyun 		*spr_val = vcpu_e500->hid0;
386*4882a593Smuzhiyun 		break;
387*4882a593Smuzhiyun 	case SPRN_HID1:
388*4882a593Smuzhiyun 		*spr_val = vcpu_e500->hid1;
389*4882a593Smuzhiyun 		break;
390*4882a593Smuzhiyun 	case SPRN_SVR:
391*4882a593Smuzhiyun 		*spr_val = vcpu_e500->svr;
392*4882a593Smuzhiyun 		break;
393*4882a593Smuzhiyun 
394*4882a593Smuzhiyun 	case SPRN_MMUCSR0:
395*4882a593Smuzhiyun 		*spr_val = 0;
396*4882a593Smuzhiyun 		break;
397*4882a593Smuzhiyun 
398*4882a593Smuzhiyun 	case SPRN_MMUCFG:
399*4882a593Smuzhiyun 		*spr_val = vcpu->arch.mmucfg;
400*4882a593Smuzhiyun 		break;
401*4882a593Smuzhiyun 	case SPRN_EPTCFG:
402*4882a593Smuzhiyun 		if (!has_feature(vcpu, VCPU_FTR_MMU_V2))
403*4882a593Smuzhiyun 			return EMULATE_FAIL;
404*4882a593Smuzhiyun 		/*
405*4882a593Smuzhiyun 		 * Legacy Linux guests access EPTCFG register even if the E.PT
406*4882a593Smuzhiyun 		 * category is disabled in the VM. Give them a chance to live.
407*4882a593Smuzhiyun 		 */
408*4882a593Smuzhiyun 		*spr_val = vcpu->arch.eptcfg;
409*4882a593Smuzhiyun 		break;
410*4882a593Smuzhiyun 
411*4882a593Smuzhiyun 	case SPRN_PWRMGTCR0:
412*4882a593Smuzhiyun 		*spr_val = vcpu->arch.pwrmgtcr0;
413*4882a593Smuzhiyun 		break;
414*4882a593Smuzhiyun 
415*4882a593Smuzhiyun 	/* extra exceptions */
416*4882a593Smuzhiyun #ifdef CONFIG_SPE_POSSIBLE
417*4882a593Smuzhiyun 	case SPRN_IVOR32:
418*4882a593Smuzhiyun 		*spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_UNAVAIL];
419*4882a593Smuzhiyun 		break;
420*4882a593Smuzhiyun 	case SPRN_IVOR33:
421*4882a593Smuzhiyun 		*spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_FP_DATA];
422*4882a593Smuzhiyun 		break;
423*4882a593Smuzhiyun 	case SPRN_IVOR34:
424*4882a593Smuzhiyun 		*spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_FP_ROUND];
425*4882a593Smuzhiyun 		break;
426*4882a593Smuzhiyun #endif
427*4882a593Smuzhiyun #ifdef CONFIG_ALTIVEC
428*4882a593Smuzhiyun 	case SPRN_IVOR32:
429*4882a593Smuzhiyun 		*spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_ALTIVEC_UNAVAIL];
430*4882a593Smuzhiyun 		break;
431*4882a593Smuzhiyun 	case SPRN_IVOR33:
432*4882a593Smuzhiyun 		*spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_ALTIVEC_ASSIST];
433*4882a593Smuzhiyun 		break;
434*4882a593Smuzhiyun #endif
435*4882a593Smuzhiyun 	case SPRN_IVOR35:
436*4882a593Smuzhiyun 		*spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_PERFORMANCE_MONITOR];
437*4882a593Smuzhiyun 		break;
438*4882a593Smuzhiyun #ifdef CONFIG_KVM_BOOKE_HV
439*4882a593Smuzhiyun 	case SPRN_IVOR36:
440*4882a593Smuzhiyun 		*spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_DBELL];
441*4882a593Smuzhiyun 		break;
442*4882a593Smuzhiyun 	case SPRN_IVOR37:
443*4882a593Smuzhiyun 		*spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_DBELL_CRIT];
444*4882a593Smuzhiyun 		break;
445*4882a593Smuzhiyun #endif
446*4882a593Smuzhiyun 	default:
447*4882a593Smuzhiyun 		emulated = kvmppc_booke_emulate_mfspr(vcpu, sprn, spr_val);
448*4882a593Smuzhiyun 	}
449*4882a593Smuzhiyun 
450*4882a593Smuzhiyun 	return emulated;
451*4882a593Smuzhiyun }
452*4882a593Smuzhiyun 
453