xref: /OK3568_Linux_fs/kernel/arch/mips/kvm/emulate.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun  * This file is subject to the terms and conditions of the GNU General Public
3*4882a593Smuzhiyun  * License.  See the file "COPYING" in the main directory of this archive
4*4882a593Smuzhiyun  * for more details.
5*4882a593Smuzhiyun  *
6*4882a593Smuzhiyun  * KVM/MIPS: Instruction/Exception emulation
7*4882a593Smuzhiyun  *
8*4882a593Smuzhiyun  * Copyright (C) 2012  MIPS Technologies, Inc.  All rights reserved.
9*4882a593Smuzhiyun  * Authors: Sanjay Lal <sanjayl@kymasys.com>
10*4882a593Smuzhiyun  */
11*4882a593Smuzhiyun 
12*4882a593Smuzhiyun #include <linux/errno.h>
13*4882a593Smuzhiyun #include <linux/err.h>
14*4882a593Smuzhiyun #include <linux/ktime.h>
15*4882a593Smuzhiyun #include <linux/kvm_host.h>
16*4882a593Smuzhiyun #include <linux/vmalloc.h>
17*4882a593Smuzhiyun #include <linux/fs.h>
18*4882a593Smuzhiyun #include <linux/memblock.h>
19*4882a593Smuzhiyun #include <linux/random.h>
20*4882a593Smuzhiyun #include <asm/page.h>
21*4882a593Smuzhiyun #include <asm/cacheflush.h>
22*4882a593Smuzhiyun #include <asm/cacheops.h>
23*4882a593Smuzhiyun #include <asm/cpu-info.h>
24*4882a593Smuzhiyun #include <asm/mmu_context.h>
25*4882a593Smuzhiyun #include <asm/tlbflush.h>
26*4882a593Smuzhiyun #include <asm/inst.h>
27*4882a593Smuzhiyun 
28*4882a593Smuzhiyun #undef CONFIG_MIPS_MT
29*4882a593Smuzhiyun #include <asm/r4kcache.h>
30*4882a593Smuzhiyun #define CONFIG_MIPS_MT
31*4882a593Smuzhiyun 
32*4882a593Smuzhiyun #include "interrupt.h"
33*4882a593Smuzhiyun #include "commpage.h"
34*4882a593Smuzhiyun 
35*4882a593Smuzhiyun #include "trace.h"
36*4882a593Smuzhiyun 
37*4882a593Smuzhiyun /*
38*4882a593Smuzhiyun  * Compute the return address and do emulate branch simulation, if required.
39*4882a593Smuzhiyun  * This function should be called only in branch delay slot active.
40*4882a593Smuzhiyun  */
kvm_compute_return_epc(struct kvm_vcpu * vcpu,unsigned long instpc,unsigned long * out)41*4882a593Smuzhiyun static int kvm_compute_return_epc(struct kvm_vcpu *vcpu, unsigned long instpc,
42*4882a593Smuzhiyun 				  unsigned long *out)
43*4882a593Smuzhiyun {
44*4882a593Smuzhiyun 	unsigned int dspcontrol;
45*4882a593Smuzhiyun 	union mips_instruction insn;
46*4882a593Smuzhiyun 	struct kvm_vcpu_arch *arch = &vcpu->arch;
47*4882a593Smuzhiyun 	long epc = instpc;
48*4882a593Smuzhiyun 	long nextpc;
49*4882a593Smuzhiyun 	int err;
50*4882a593Smuzhiyun 
51*4882a593Smuzhiyun 	if (epc & 3) {
52*4882a593Smuzhiyun 		kvm_err("%s: unaligned epc\n", __func__);
53*4882a593Smuzhiyun 		return -EINVAL;
54*4882a593Smuzhiyun 	}
55*4882a593Smuzhiyun 
56*4882a593Smuzhiyun 	/* Read the instruction */
57*4882a593Smuzhiyun 	err = kvm_get_badinstrp((u32 *)epc, vcpu, &insn.word);
58*4882a593Smuzhiyun 	if (err)
59*4882a593Smuzhiyun 		return err;
60*4882a593Smuzhiyun 
61*4882a593Smuzhiyun 	switch (insn.i_format.opcode) {
62*4882a593Smuzhiyun 		/* jr and jalr are in r_format format. */
63*4882a593Smuzhiyun 	case spec_op:
64*4882a593Smuzhiyun 		switch (insn.r_format.func) {
65*4882a593Smuzhiyun 		case jalr_op:
66*4882a593Smuzhiyun 			arch->gprs[insn.r_format.rd] = epc + 8;
67*4882a593Smuzhiyun 			fallthrough;
68*4882a593Smuzhiyun 		case jr_op:
69*4882a593Smuzhiyun 			nextpc = arch->gprs[insn.r_format.rs];
70*4882a593Smuzhiyun 			break;
71*4882a593Smuzhiyun 		default:
72*4882a593Smuzhiyun 			return -EINVAL;
73*4882a593Smuzhiyun 		}
74*4882a593Smuzhiyun 		break;
75*4882a593Smuzhiyun 
76*4882a593Smuzhiyun 		/*
77*4882a593Smuzhiyun 		 * This group contains:
78*4882a593Smuzhiyun 		 * bltz_op, bgez_op, bltzl_op, bgezl_op,
79*4882a593Smuzhiyun 		 * bltzal_op, bgezal_op, bltzall_op, bgezall_op.
80*4882a593Smuzhiyun 		 */
81*4882a593Smuzhiyun 	case bcond_op:
82*4882a593Smuzhiyun 		switch (insn.i_format.rt) {
83*4882a593Smuzhiyun 		case bltz_op:
84*4882a593Smuzhiyun 		case bltzl_op:
85*4882a593Smuzhiyun 			if ((long)arch->gprs[insn.i_format.rs] < 0)
86*4882a593Smuzhiyun 				epc = epc + 4 + (insn.i_format.simmediate << 2);
87*4882a593Smuzhiyun 			else
88*4882a593Smuzhiyun 				epc += 8;
89*4882a593Smuzhiyun 			nextpc = epc;
90*4882a593Smuzhiyun 			break;
91*4882a593Smuzhiyun 
92*4882a593Smuzhiyun 		case bgez_op:
93*4882a593Smuzhiyun 		case bgezl_op:
94*4882a593Smuzhiyun 			if ((long)arch->gprs[insn.i_format.rs] >= 0)
95*4882a593Smuzhiyun 				epc = epc + 4 + (insn.i_format.simmediate << 2);
96*4882a593Smuzhiyun 			else
97*4882a593Smuzhiyun 				epc += 8;
98*4882a593Smuzhiyun 			nextpc = epc;
99*4882a593Smuzhiyun 			break;
100*4882a593Smuzhiyun 
101*4882a593Smuzhiyun 		case bltzal_op:
102*4882a593Smuzhiyun 		case bltzall_op:
103*4882a593Smuzhiyun 			arch->gprs[31] = epc + 8;
104*4882a593Smuzhiyun 			if ((long)arch->gprs[insn.i_format.rs] < 0)
105*4882a593Smuzhiyun 				epc = epc + 4 + (insn.i_format.simmediate << 2);
106*4882a593Smuzhiyun 			else
107*4882a593Smuzhiyun 				epc += 8;
108*4882a593Smuzhiyun 			nextpc = epc;
109*4882a593Smuzhiyun 			break;
110*4882a593Smuzhiyun 
111*4882a593Smuzhiyun 		case bgezal_op:
112*4882a593Smuzhiyun 		case bgezall_op:
113*4882a593Smuzhiyun 			arch->gprs[31] = epc + 8;
114*4882a593Smuzhiyun 			if ((long)arch->gprs[insn.i_format.rs] >= 0)
115*4882a593Smuzhiyun 				epc = epc + 4 + (insn.i_format.simmediate << 2);
116*4882a593Smuzhiyun 			else
117*4882a593Smuzhiyun 				epc += 8;
118*4882a593Smuzhiyun 			nextpc = epc;
119*4882a593Smuzhiyun 			break;
120*4882a593Smuzhiyun 		case bposge32_op:
121*4882a593Smuzhiyun 			if (!cpu_has_dsp) {
122*4882a593Smuzhiyun 				kvm_err("%s: DSP branch but not DSP ASE\n",
123*4882a593Smuzhiyun 					__func__);
124*4882a593Smuzhiyun 				return -EINVAL;
125*4882a593Smuzhiyun 			}
126*4882a593Smuzhiyun 
127*4882a593Smuzhiyun 			dspcontrol = rddsp(0x01);
128*4882a593Smuzhiyun 
129*4882a593Smuzhiyun 			if (dspcontrol >= 32)
130*4882a593Smuzhiyun 				epc = epc + 4 + (insn.i_format.simmediate << 2);
131*4882a593Smuzhiyun 			else
132*4882a593Smuzhiyun 				epc += 8;
133*4882a593Smuzhiyun 			nextpc = epc;
134*4882a593Smuzhiyun 			break;
135*4882a593Smuzhiyun 		default:
136*4882a593Smuzhiyun 			return -EINVAL;
137*4882a593Smuzhiyun 		}
138*4882a593Smuzhiyun 		break;
139*4882a593Smuzhiyun 
140*4882a593Smuzhiyun 		/* These are unconditional and in j_format. */
141*4882a593Smuzhiyun 	case jal_op:
142*4882a593Smuzhiyun 		arch->gprs[31] = instpc + 8;
143*4882a593Smuzhiyun 		fallthrough;
144*4882a593Smuzhiyun 	case j_op:
145*4882a593Smuzhiyun 		epc += 4;
146*4882a593Smuzhiyun 		epc >>= 28;
147*4882a593Smuzhiyun 		epc <<= 28;
148*4882a593Smuzhiyun 		epc |= (insn.j_format.target << 2);
149*4882a593Smuzhiyun 		nextpc = epc;
150*4882a593Smuzhiyun 		break;
151*4882a593Smuzhiyun 
152*4882a593Smuzhiyun 		/* These are conditional and in i_format. */
153*4882a593Smuzhiyun 	case beq_op:
154*4882a593Smuzhiyun 	case beql_op:
155*4882a593Smuzhiyun 		if (arch->gprs[insn.i_format.rs] ==
156*4882a593Smuzhiyun 		    arch->gprs[insn.i_format.rt])
157*4882a593Smuzhiyun 			epc = epc + 4 + (insn.i_format.simmediate << 2);
158*4882a593Smuzhiyun 		else
159*4882a593Smuzhiyun 			epc += 8;
160*4882a593Smuzhiyun 		nextpc = epc;
161*4882a593Smuzhiyun 		break;
162*4882a593Smuzhiyun 
163*4882a593Smuzhiyun 	case bne_op:
164*4882a593Smuzhiyun 	case bnel_op:
165*4882a593Smuzhiyun 		if (arch->gprs[insn.i_format.rs] !=
166*4882a593Smuzhiyun 		    arch->gprs[insn.i_format.rt])
167*4882a593Smuzhiyun 			epc = epc + 4 + (insn.i_format.simmediate << 2);
168*4882a593Smuzhiyun 		else
169*4882a593Smuzhiyun 			epc += 8;
170*4882a593Smuzhiyun 		nextpc = epc;
171*4882a593Smuzhiyun 		break;
172*4882a593Smuzhiyun 
173*4882a593Smuzhiyun 	case blez_op:	/* POP06 */
174*4882a593Smuzhiyun #ifndef CONFIG_CPU_MIPSR6
175*4882a593Smuzhiyun 	case blezl_op:	/* removed in R6 */
176*4882a593Smuzhiyun #endif
177*4882a593Smuzhiyun 		if (insn.i_format.rt != 0)
178*4882a593Smuzhiyun 			goto compact_branch;
179*4882a593Smuzhiyun 		if ((long)arch->gprs[insn.i_format.rs] <= 0)
180*4882a593Smuzhiyun 			epc = epc + 4 + (insn.i_format.simmediate << 2);
181*4882a593Smuzhiyun 		else
182*4882a593Smuzhiyun 			epc += 8;
183*4882a593Smuzhiyun 		nextpc = epc;
184*4882a593Smuzhiyun 		break;
185*4882a593Smuzhiyun 
186*4882a593Smuzhiyun 	case bgtz_op:	/* POP07 */
187*4882a593Smuzhiyun #ifndef CONFIG_CPU_MIPSR6
188*4882a593Smuzhiyun 	case bgtzl_op:	/* removed in R6 */
189*4882a593Smuzhiyun #endif
190*4882a593Smuzhiyun 		if (insn.i_format.rt != 0)
191*4882a593Smuzhiyun 			goto compact_branch;
192*4882a593Smuzhiyun 		if ((long)arch->gprs[insn.i_format.rs] > 0)
193*4882a593Smuzhiyun 			epc = epc + 4 + (insn.i_format.simmediate << 2);
194*4882a593Smuzhiyun 		else
195*4882a593Smuzhiyun 			epc += 8;
196*4882a593Smuzhiyun 		nextpc = epc;
197*4882a593Smuzhiyun 		break;
198*4882a593Smuzhiyun 
199*4882a593Smuzhiyun 		/* And now the FPA/cp1 branch instructions. */
200*4882a593Smuzhiyun 	case cop1_op:
201*4882a593Smuzhiyun 		kvm_err("%s: unsupported cop1_op\n", __func__);
202*4882a593Smuzhiyun 		return -EINVAL;
203*4882a593Smuzhiyun 
204*4882a593Smuzhiyun #ifdef CONFIG_CPU_MIPSR6
205*4882a593Smuzhiyun 	/* R6 added the following compact branches with forbidden slots */
206*4882a593Smuzhiyun 	case blezl_op:	/* POP26 */
207*4882a593Smuzhiyun 	case bgtzl_op:	/* POP27 */
208*4882a593Smuzhiyun 		/* only rt == 0 isn't compact branch */
209*4882a593Smuzhiyun 		if (insn.i_format.rt != 0)
210*4882a593Smuzhiyun 			goto compact_branch;
211*4882a593Smuzhiyun 		return -EINVAL;
212*4882a593Smuzhiyun 	case pop10_op:
213*4882a593Smuzhiyun 	case pop30_op:
214*4882a593Smuzhiyun 		/* only rs == rt == 0 is reserved, rest are compact branches */
215*4882a593Smuzhiyun 		if (insn.i_format.rs != 0 || insn.i_format.rt != 0)
216*4882a593Smuzhiyun 			goto compact_branch;
217*4882a593Smuzhiyun 		return -EINVAL;
218*4882a593Smuzhiyun 	case pop66_op:
219*4882a593Smuzhiyun 	case pop76_op:
220*4882a593Smuzhiyun 		/* only rs == 0 isn't compact branch */
221*4882a593Smuzhiyun 		if (insn.i_format.rs != 0)
222*4882a593Smuzhiyun 			goto compact_branch;
223*4882a593Smuzhiyun 		return -EINVAL;
224*4882a593Smuzhiyun compact_branch:
225*4882a593Smuzhiyun 		/*
226*4882a593Smuzhiyun 		 * If we've hit an exception on the forbidden slot, then
227*4882a593Smuzhiyun 		 * the branch must not have been taken.
228*4882a593Smuzhiyun 		 */
229*4882a593Smuzhiyun 		epc += 8;
230*4882a593Smuzhiyun 		nextpc = epc;
231*4882a593Smuzhiyun 		break;
232*4882a593Smuzhiyun #else
233*4882a593Smuzhiyun compact_branch:
234*4882a593Smuzhiyun 		/* Fall through - Compact branches not supported before R6 */
235*4882a593Smuzhiyun #endif
236*4882a593Smuzhiyun 	default:
237*4882a593Smuzhiyun 		return -EINVAL;
238*4882a593Smuzhiyun 	}
239*4882a593Smuzhiyun 
240*4882a593Smuzhiyun 	*out = nextpc;
241*4882a593Smuzhiyun 	return 0;
242*4882a593Smuzhiyun }
243*4882a593Smuzhiyun 
update_pc(struct kvm_vcpu * vcpu,u32 cause)244*4882a593Smuzhiyun enum emulation_result update_pc(struct kvm_vcpu *vcpu, u32 cause)
245*4882a593Smuzhiyun {
246*4882a593Smuzhiyun 	int err;
247*4882a593Smuzhiyun 
248*4882a593Smuzhiyun 	if (cause & CAUSEF_BD) {
249*4882a593Smuzhiyun 		err = kvm_compute_return_epc(vcpu, vcpu->arch.pc,
250*4882a593Smuzhiyun 					     &vcpu->arch.pc);
251*4882a593Smuzhiyun 		if (err)
252*4882a593Smuzhiyun 			return EMULATE_FAIL;
253*4882a593Smuzhiyun 	} else {
254*4882a593Smuzhiyun 		vcpu->arch.pc += 4;
255*4882a593Smuzhiyun 	}
256*4882a593Smuzhiyun 
257*4882a593Smuzhiyun 	kvm_debug("update_pc(): New PC: %#lx\n", vcpu->arch.pc);
258*4882a593Smuzhiyun 
259*4882a593Smuzhiyun 	return EMULATE_DONE;
260*4882a593Smuzhiyun }
261*4882a593Smuzhiyun 
262*4882a593Smuzhiyun /**
263*4882a593Smuzhiyun  * kvm_get_badinstr() - Get bad instruction encoding.
264*4882a593Smuzhiyun  * @opc:	Guest pointer to faulting instruction.
265*4882a593Smuzhiyun  * @vcpu:	KVM VCPU information.
266*4882a593Smuzhiyun  *
267*4882a593Smuzhiyun  * Gets the instruction encoding of the faulting instruction, using the saved
268*4882a593Smuzhiyun  * BadInstr register value if it exists, otherwise falling back to reading guest
269*4882a593Smuzhiyun  * memory at @opc.
270*4882a593Smuzhiyun  *
271*4882a593Smuzhiyun  * Returns:	The instruction encoding of the faulting instruction.
272*4882a593Smuzhiyun  */
kvm_get_badinstr(u32 * opc,struct kvm_vcpu * vcpu,u32 * out)273*4882a593Smuzhiyun int kvm_get_badinstr(u32 *opc, struct kvm_vcpu *vcpu, u32 *out)
274*4882a593Smuzhiyun {
275*4882a593Smuzhiyun 	if (cpu_has_badinstr) {
276*4882a593Smuzhiyun 		*out = vcpu->arch.host_cp0_badinstr;
277*4882a593Smuzhiyun 		return 0;
278*4882a593Smuzhiyun 	} else {
279*4882a593Smuzhiyun 		return kvm_get_inst(opc, vcpu, out);
280*4882a593Smuzhiyun 	}
281*4882a593Smuzhiyun }
282*4882a593Smuzhiyun 
283*4882a593Smuzhiyun /**
284*4882a593Smuzhiyun  * kvm_get_badinstrp() - Get bad prior instruction encoding.
285*4882a593Smuzhiyun  * @opc:	Guest pointer to prior faulting instruction.
286*4882a593Smuzhiyun  * @vcpu:	KVM VCPU information.
287*4882a593Smuzhiyun  *
288*4882a593Smuzhiyun  * Gets the instruction encoding of the prior faulting instruction (the branch
289*4882a593Smuzhiyun  * containing the delay slot which faulted), using the saved BadInstrP register
290*4882a593Smuzhiyun  * value if it exists, otherwise falling back to reading guest memory at @opc.
291*4882a593Smuzhiyun  *
292*4882a593Smuzhiyun  * Returns:	The instruction encoding of the prior faulting instruction.
293*4882a593Smuzhiyun  */
kvm_get_badinstrp(u32 * opc,struct kvm_vcpu * vcpu,u32 * out)294*4882a593Smuzhiyun int kvm_get_badinstrp(u32 *opc, struct kvm_vcpu *vcpu, u32 *out)
295*4882a593Smuzhiyun {
296*4882a593Smuzhiyun 	if (cpu_has_badinstrp) {
297*4882a593Smuzhiyun 		*out = vcpu->arch.host_cp0_badinstrp;
298*4882a593Smuzhiyun 		return 0;
299*4882a593Smuzhiyun 	} else {
300*4882a593Smuzhiyun 		return kvm_get_inst(opc, vcpu, out);
301*4882a593Smuzhiyun 	}
302*4882a593Smuzhiyun }
303*4882a593Smuzhiyun 
304*4882a593Smuzhiyun /**
305*4882a593Smuzhiyun  * kvm_mips_count_disabled() - Find whether the CP0_Count timer is disabled.
306*4882a593Smuzhiyun  * @vcpu:	Virtual CPU.
307*4882a593Smuzhiyun  *
308*4882a593Smuzhiyun  * Returns:	1 if the CP0_Count timer is disabled by either the guest
309*4882a593Smuzhiyun  *		CP0_Cause.DC bit or the count_ctl.DC bit.
310*4882a593Smuzhiyun  *		0 otherwise (in which case CP0_Count timer is running).
311*4882a593Smuzhiyun  */
kvm_mips_count_disabled(struct kvm_vcpu * vcpu)312*4882a593Smuzhiyun int kvm_mips_count_disabled(struct kvm_vcpu *vcpu)
313*4882a593Smuzhiyun {
314*4882a593Smuzhiyun 	struct mips_coproc *cop0 = vcpu->arch.cop0;
315*4882a593Smuzhiyun 
316*4882a593Smuzhiyun 	return	(vcpu->arch.count_ctl & KVM_REG_MIPS_COUNT_CTL_DC) ||
317*4882a593Smuzhiyun 		(kvm_read_c0_guest_cause(cop0) & CAUSEF_DC);
318*4882a593Smuzhiyun }
319*4882a593Smuzhiyun 
320*4882a593Smuzhiyun /**
321*4882a593Smuzhiyun  * kvm_mips_ktime_to_count() - Scale ktime_t to a 32-bit count.
322*4882a593Smuzhiyun  *
323*4882a593Smuzhiyun  * Caches the dynamic nanosecond bias in vcpu->arch.count_dyn_bias.
324*4882a593Smuzhiyun  *
325*4882a593Smuzhiyun  * Assumes !kvm_mips_count_disabled(@vcpu) (guest CP0_Count timer is running).
326*4882a593Smuzhiyun  */
kvm_mips_ktime_to_count(struct kvm_vcpu * vcpu,ktime_t now)327*4882a593Smuzhiyun static u32 kvm_mips_ktime_to_count(struct kvm_vcpu *vcpu, ktime_t now)
328*4882a593Smuzhiyun {
329*4882a593Smuzhiyun 	s64 now_ns, periods;
330*4882a593Smuzhiyun 	u64 delta;
331*4882a593Smuzhiyun 
332*4882a593Smuzhiyun 	now_ns = ktime_to_ns(now);
333*4882a593Smuzhiyun 	delta = now_ns + vcpu->arch.count_dyn_bias;
334*4882a593Smuzhiyun 
335*4882a593Smuzhiyun 	if (delta >= vcpu->arch.count_period) {
336*4882a593Smuzhiyun 		/* If delta is out of safe range the bias needs adjusting */
337*4882a593Smuzhiyun 		periods = div64_s64(now_ns, vcpu->arch.count_period);
338*4882a593Smuzhiyun 		vcpu->arch.count_dyn_bias = -periods * vcpu->arch.count_period;
339*4882a593Smuzhiyun 		/* Recalculate delta with new bias */
340*4882a593Smuzhiyun 		delta = now_ns + vcpu->arch.count_dyn_bias;
341*4882a593Smuzhiyun 	}
342*4882a593Smuzhiyun 
343*4882a593Smuzhiyun 	/*
344*4882a593Smuzhiyun 	 * We've ensured that:
345*4882a593Smuzhiyun 	 *   delta < count_period
346*4882a593Smuzhiyun 	 *
347*4882a593Smuzhiyun 	 * Therefore the intermediate delta*count_hz will never overflow since
348*4882a593Smuzhiyun 	 * at the boundary condition:
349*4882a593Smuzhiyun 	 *   delta = count_period
350*4882a593Smuzhiyun 	 *   delta = NSEC_PER_SEC * 2^32 / count_hz
351*4882a593Smuzhiyun 	 *   delta * count_hz = NSEC_PER_SEC * 2^32
352*4882a593Smuzhiyun 	 */
353*4882a593Smuzhiyun 	return div_u64(delta * vcpu->arch.count_hz, NSEC_PER_SEC);
354*4882a593Smuzhiyun }
355*4882a593Smuzhiyun 
356*4882a593Smuzhiyun /**
357*4882a593Smuzhiyun  * kvm_mips_count_time() - Get effective current time.
358*4882a593Smuzhiyun  * @vcpu:	Virtual CPU.
359*4882a593Smuzhiyun  *
360*4882a593Smuzhiyun  * Get effective monotonic ktime. This is usually a straightforward ktime_get(),
361*4882a593Smuzhiyun  * except when the master disable bit is set in count_ctl, in which case it is
362*4882a593Smuzhiyun  * count_resume, i.e. the time that the count was disabled.
363*4882a593Smuzhiyun  *
364*4882a593Smuzhiyun  * Returns:	Effective monotonic ktime for CP0_Count.
365*4882a593Smuzhiyun  */
kvm_mips_count_time(struct kvm_vcpu * vcpu)366*4882a593Smuzhiyun static inline ktime_t kvm_mips_count_time(struct kvm_vcpu *vcpu)
367*4882a593Smuzhiyun {
368*4882a593Smuzhiyun 	if (unlikely(vcpu->arch.count_ctl & KVM_REG_MIPS_COUNT_CTL_DC))
369*4882a593Smuzhiyun 		return vcpu->arch.count_resume;
370*4882a593Smuzhiyun 
371*4882a593Smuzhiyun 	return ktime_get();
372*4882a593Smuzhiyun }
373*4882a593Smuzhiyun 
374*4882a593Smuzhiyun /**
375*4882a593Smuzhiyun  * kvm_mips_read_count_running() - Read the current count value as if running.
376*4882a593Smuzhiyun  * @vcpu:	Virtual CPU.
377*4882a593Smuzhiyun  * @now:	Kernel time to read CP0_Count at.
378*4882a593Smuzhiyun  *
379*4882a593Smuzhiyun  * Returns the current guest CP0_Count register at time @now and handles if the
380*4882a593Smuzhiyun  * timer interrupt is pending and hasn't been handled yet.
381*4882a593Smuzhiyun  *
382*4882a593Smuzhiyun  * Returns:	The current value of the guest CP0_Count register.
383*4882a593Smuzhiyun  */
kvm_mips_read_count_running(struct kvm_vcpu * vcpu,ktime_t now)384*4882a593Smuzhiyun static u32 kvm_mips_read_count_running(struct kvm_vcpu *vcpu, ktime_t now)
385*4882a593Smuzhiyun {
386*4882a593Smuzhiyun 	struct mips_coproc *cop0 = vcpu->arch.cop0;
387*4882a593Smuzhiyun 	ktime_t expires, threshold;
388*4882a593Smuzhiyun 	u32 count, compare;
389*4882a593Smuzhiyun 	int running;
390*4882a593Smuzhiyun 
391*4882a593Smuzhiyun 	/* Calculate the biased and scaled guest CP0_Count */
392*4882a593Smuzhiyun 	count = vcpu->arch.count_bias + kvm_mips_ktime_to_count(vcpu, now);
393*4882a593Smuzhiyun 	compare = kvm_read_c0_guest_compare(cop0);
394*4882a593Smuzhiyun 
395*4882a593Smuzhiyun 	/*
396*4882a593Smuzhiyun 	 * Find whether CP0_Count has reached the closest timer interrupt. If
397*4882a593Smuzhiyun 	 * not, we shouldn't inject it.
398*4882a593Smuzhiyun 	 */
399*4882a593Smuzhiyun 	if ((s32)(count - compare) < 0)
400*4882a593Smuzhiyun 		return count;
401*4882a593Smuzhiyun 
402*4882a593Smuzhiyun 	/*
403*4882a593Smuzhiyun 	 * The CP0_Count we're going to return has already reached the closest
404*4882a593Smuzhiyun 	 * timer interrupt. Quickly check if it really is a new interrupt by
405*4882a593Smuzhiyun 	 * looking at whether the interval until the hrtimer expiry time is
406*4882a593Smuzhiyun 	 * less than 1/4 of the timer period.
407*4882a593Smuzhiyun 	 */
408*4882a593Smuzhiyun 	expires = hrtimer_get_expires(&vcpu->arch.comparecount_timer);
409*4882a593Smuzhiyun 	threshold = ktime_add_ns(now, vcpu->arch.count_period / 4);
410*4882a593Smuzhiyun 	if (ktime_before(expires, threshold)) {
411*4882a593Smuzhiyun 		/*
412*4882a593Smuzhiyun 		 * Cancel it while we handle it so there's no chance of
413*4882a593Smuzhiyun 		 * interference with the timeout handler.
414*4882a593Smuzhiyun 		 */
415*4882a593Smuzhiyun 		running = hrtimer_cancel(&vcpu->arch.comparecount_timer);
416*4882a593Smuzhiyun 
417*4882a593Smuzhiyun 		/* Nothing should be waiting on the timeout */
418*4882a593Smuzhiyun 		kvm_mips_callbacks->queue_timer_int(vcpu);
419*4882a593Smuzhiyun 
420*4882a593Smuzhiyun 		/*
421*4882a593Smuzhiyun 		 * Restart the timer if it was running based on the expiry time
422*4882a593Smuzhiyun 		 * we read, so that we don't push it back 2 periods.
423*4882a593Smuzhiyun 		 */
424*4882a593Smuzhiyun 		if (running) {
425*4882a593Smuzhiyun 			expires = ktime_add_ns(expires,
426*4882a593Smuzhiyun 					       vcpu->arch.count_period);
427*4882a593Smuzhiyun 			hrtimer_start(&vcpu->arch.comparecount_timer, expires,
428*4882a593Smuzhiyun 				      HRTIMER_MODE_ABS);
429*4882a593Smuzhiyun 		}
430*4882a593Smuzhiyun 	}
431*4882a593Smuzhiyun 
432*4882a593Smuzhiyun 	return count;
433*4882a593Smuzhiyun }
434*4882a593Smuzhiyun 
435*4882a593Smuzhiyun /**
436*4882a593Smuzhiyun  * kvm_mips_read_count() - Read the current count value.
437*4882a593Smuzhiyun  * @vcpu:	Virtual CPU.
438*4882a593Smuzhiyun  *
439*4882a593Smuzhiyun  * Read the current guest CP0_Count value, taking into account whether the timer
440*4882a593Smuzhiyun  * is stopped.
441*4882a593Smuzhiyun  *
442*4882a593Smuzhiyun  * Returns:	The current guest CP0_Count value.
443*4882a593Smuzhiyun  */
kvm_mips_read_count(struct kvm_vcpu * vcpu)444*4882a593Smuzhiyun u32 kvm_mips_read_count(struct kvm_vcpu *vcpu)
445*4882a593Smuzhiyun {
446*4882a593Smuzhiyun 	struct mips_coproc *cop0 = vcpu->arch.cop0;
447*4882a593Smuzhiyun 
448*4882a593Smuzhiyun 	/* If count disabled just read static copy of count */
449*4882a593Smuzhiyun 	if (kvm_mips_count_disabled(vcpu))
450*4882a593Smuzhiyun 		return kvm_read_c0_guest_count(cop0);
451*4882a593Smuzhiyun 
452*4882a593Smuzhiyun 	return kvm_mips_read_count_running(vcpu, ktime_get());
453*4882a593Smuzhiyun }
454*4882a593Smuzhiyun 
455*4882a593Smuzhiyun /**
456*4882a593Smuzhiyun  * kvm_mips_freeze_hrtimer() - Safely stop the hrtimer.
457*4882a593Smuzhiyun  * @vcpu:	Virtual CPU.
458*4882a593Smuzhiyun  * @count:	Output pointer for CP0_Count value at point of freeze.
459*4882a593Smuzhiyun  *
460*4882a593Smuzhiyun  * Freeze the hrtimer safely and return both the ktime and the CP0_Count value
461*4882a593Smuzhiyun  * at the point it was frozen. It is guaranteed that any pending interrupts at
462*4882a593Smuzhiyun  * the point it was frozen are handled, and none after that point.
463*4882a593Smuzhiyun  *
464*4882a593Smuzhiyun  * This is useful where the time/CP0_Count is needed in the calculation of the
465*4882a593Smuzhiyun  * new parameters.
466*4882a593Smuzhiyun  *
467*4882a593Smuzhiyun  * Assumes !kvm_mips_count_disabled(@vcpu) (guest CP0_Count timer is running).
468*4882a593Smuzhiyun  *
469*4882a593Smuzhiyun  * Returns:	The ktime at the point of freeze.
470*4882a593Smuzhiyun  */
kvm_mips_freeze_hrtimer(struct kvm_vcpu * vcpu,u32 * count)471*4882a593Smuzhiyun ktime_t kvm_mips_freeze_hrtimer(struct kvm_vcpu *vcpu, u32 *count)
472*4882a593Smuzhiyun {
473*4882a593Smuzhiyun 	ktime_t now;
474*4882a593Smuzhiyun 
475*4882a593Smuzhiyun 	/* stop hrtimer before finding time */
476*4882a593Smuzhiyun 	hrtimer_cancel(&vcpu->arch.comparecount_timer);
477*4882a593Smuzhiyun 	now = ktime_get();
478*4882a593Smuzhiyun 
479*4882a593Smuzhiyun 	/* find count at this point and handle pending hrtimer */
480*4882a593Smuzhiyun 	*count = kvm_mips_read_count_running(vcpu, now);
481*4882a593Smuzhiyun 
482*4882a593Smuzhiyun 	return now;
483*4882a593Smuzhiyun }
484*4882a593Smuzhiyun 
485*4882a593Smuzhiyun /**
486*4882a593Smuzhiyun  * kvm_mips_resume_hrtimer() - Resume hrtimer, updating expiry.
487*4882a593Smuzhiyun  * @vcpu:	Virtual CPU.
488*4882a593Smuzhiyun  * @now:	ktime at point of resume.
489*4882a593Smuzhiyun  * @count:	CP0_Count at point of resume.
490*4882a593Smuzhiyun  *
491*4882a593Smuzhiyun  * Resumes the timer and updates the timer expiry based on @now and @count.
492*4882a593Smuzhiyun  * This can be used in conjunction with kvm_mips_freeze_timer() when timer
493*4882a593Smuzhiyun  * parameters need to be changed.
494*4882a593Smuzhiyun  *
495*4882a593Smuzhiyun  * It is guaranteed that a timer interrupt immediately after resume will be
496*4882a593Smuzhiyun  * handled, but not if CP_Compare is exactly at @count. That case is already
497*4882a593Smuzhiyun  * handled by kvm_mips_freeze_timer().
498*4882a593Smuzhiyun  *
499*4882a593Smuzhiyun  * Assumes !kvm_mips_count_disabled(@vcpu) (guest CP0_Count timer is running).
500*4882a593Smuzhiyun  */
kvm_mips_resume_hrtimer(struct kvm_vcpu * vcpu,ktime_t now,u32 count)501*4882a593Smuzhiyun static void kvm_mips_resume_hrtimer(struct kvm_vcpu *vcpu,
502*4882a593Smuzhiyun 				    ktime_t now, u32 count)
503*4882a593Smuzhiyun {
504*4882a593Smuzhiyun 	struct mips_coproc *cop0 = vcpu->arch.cop0;
505*4882a593Smuzhiyun 	u32 compare;
506*4882a593Smuzhiyun 	u64 delta;
507*4882a593Smuzhiyun 	ktime_t expire;
508*4882a593Smuzhiyun 
509*4882a593Smuzhiyun 	/* Calculate timeout (wrap 0 to 2^32) */
510*4882a593Smuzhiyun 	compare = kvm_read_c0_guest_compare(cop0);
511*4882a593Smuzhiyun 	delta = (u64)(u32)(compare - count - 1) + 1;
512*4882a593Smuzhiyun 	delta = div_u64(delta * NSEC_PER_SEC, vcpu->arch.count_hz);
513*4882a593Smuzhiyun 	expire = ktime_add_ns(now, delta);
514*4882a593Smuzhiyun 
515*4882a593Smuzhiyun 	/* Update hrtimer to use new timeout */
516*4882a593Smuzhiyun 	hrtimer_cancel(&vcpu->arch.comparecount_timer);
517*4882a593Smuzhiyun 	hrtimer_start(&vcpu->arch.comparecount_timer, expire, HRTIMER_MODE_ABS);
518*4882a593Smuzhiyun }
519*4882a593Smuzhiyun 
520*4882a593Smuzhiyun /**
521*4882a593Smuzhiyun  * kvm_mips_restore_hrtimer() - Restore hrtimer after a gap, updating expiry.
522*4882a593Smuzhiyun  * @vcpu:	Virtual CPU.
523*4882a593Smuzhiyun  * @before:	Time before Count was saved, lower bound of drift calculation.
524*4882a593Smuzhiyun  * @count:	CP0_Count at point of restore.
525*4882a593Smuzhiyun  * @min_drift:	Minimum amount of drift permitted before correction.
526*4882a593Smuzhiyun  *		Must be <= 0.
527*4882a593Smuzhiyun  *
528*4882a593Smuzhiyun  * Restores the timer from a particular @count, accounting for drift. This can
529*4882a593Smuzhiyun  * be used in conjunction with kvm_mips_freeze_timer() when a hardware timer is
530*4882a593Smuzhiyun  * to be used for a period of time, but the exact ktime corresponding to the
531*4882a593Smuzhiyun  * final Count that must be restored is not known.
532*4882a593Smuzhiyun  *
533*4882a593Smuzhiyun  * It is gauranteed that a timer interrupt immediately after restore will be
534*4882a593Smuzhiyun  * handled, but not if CP0_Compare is exactly at @count. That case should
535*4882a593Smuzhiyun  * already be handled when the hardware timer state is saved.
536*4882a593Smuzhiyun  *
537*4882a593Smuzhiyun  * Assumes !kvm_mips_count_disabled(@vcpu) (guest CP0_Count timer is not
538*4882a593Smuzhiyun  * stopped).
539*4882a593Smuzhiyun  *
540*4882a593Smuzhiyun  * Returns:	Amount of correction to count_bias due to drift.
541*4882a593Smuzhiyun  */
kvm_mips_restore_hrtimer(struct kvm_vcpu * vcpu,ktime_t before,u32 count,int min_drift)542*4882a593Smuzhiyun int kvm_mips_restore_hrtimer(struct kvm_vcpu *vcpu, ktime_t before,
543*4882a593Smuzhiyun 			     u32 count, int min_drift)
544*4882a593Smuzhiyun {
545*4882a593Smuzhiyun 	ktime_t now, count_time;
546*4882a593Smuzhiyun 	u32 now_count, before_count;
547*4882a593Smuzhiyun 	u64 delta;
548*4882a593Smuzhiyun 	int drift, ret = 0;
549*4882a593Smuzhiyun 
550*4882a593Smuzhiyun 	/* Calculate expected count at before */
551*4882a593Smuzhiyun 	before_count = vcpu->arch.count_bias +
552*4882a593Smuzhiyun 			kvm_mips_ktime_to_count(vcpu, before);
553*4882a593Smuzhiyun 
554*4882a593Smuzhiyun 	/*
555*4882a593Smuzhiyun 	 * Detect significantly negative drift, where count is lower than
556*4882a593Smuzhiyun 	 * expected. Some negative drift is expected when hardware counter is
557*4882a593Smuzhiyun 	 * set after kvm_mips_freeze_timer(), and it is harmless to allow the
558*4882a593Smuzhiyun 	 * time to jump forwards a little, within reason. If the drift is too
559*4882a593Smuzhiyun 	 * significant, adjust the bias to avoid a big Guest.CP0_Count jump.
560*4882a593Smuzhiyun 	 */
561*4882a593Smuzhiyun 	drift = count - before_count;
562*4882a593Smuzhiyun 	if (drift < min_drift) {
563*4882a593Smuzhiyun 		count_time = before;
564*4882a593Smuzhiyun 		vcpu->arch.count_bias += drift;
565*4882a593Smuzhiyun 		ret = drift;
566*4882a593Smuzhiyun 		goto resume;
567*4882a593Smuzhiyun 	}
568*4882a593Smuzhiyun 
569*4882a593Smuzhiyun 	/* Calculate expected count right now */
570*4882a593Smuzhiyun 	now = ktime_get();
571*4882a593Smuzhiyun 	now_count = vcpu->arch.count_bias + kvm_mips_ktime_to_count(vcpu, now);
572*4882a593Smuzhiyun 
573*4882a593Smuzhiyun 	/*
574*4882a593Smuzhiyun 	 * Detect positive drift, where count is higher than expected, and
575*4882a593Smuzhiyun 	 * adjust the bias to avoid guest time going backwards.
576*4882a593Smuzhiyun 	 */
577*4882a593Smuzhiyun 	drift = count - now_count;
578*4882a593Smuzhiyun 	if (drift > 0) {
579*4882a593Smuzhiyun 		count_time = now;
580*4882a593Smuzhiyun 		vcpu->arch.count_bias += drift;
581*4882a593Smuzhiyun 		ret = drift;
582*4882a593Smuzhiyun 		goto resume;
583*4882a593Smuzhiyun 	}
584*4882a593Smuzhiyun 
585*4882a593Smuzhiyun 	/* Subtract nanosecond delta to find ktime when count was read */
586*4882a593Smuzhiyun 	delta = (u64)(u32)(now_count - count);
587*4882a593Smuzhiyun 	delta = div_u64(delta * NSEC_PER_SEC, vcpu->arch.count_hz);
588*4882a593Smuzhiyun 	count_time = ktime_sub_ns(now, delta);
589*4882a593Smuzhiyun 
590*4882a593Smuzhiyun resume:
591*4882a593Smuzhiyun 	/* Resume using the calculated ktime */
592*4882a593Smuzhiyun 	kvm_mips_resume_hrtimer(vcpu, count_time, count);
593*4882a593Smuzhiyun 	return ret;
594*4882a593Smuzhiyun }
595*4882a593Smuzhiyun 
596*4882a593Smuzhiyun /**
597*4882a593Smuzhiyun  * kvm_mips_write_count() - Modify the count and update timer.
598*4882a593Smuzhiyun  * @vcpu:	Virtual CPU.
599*4882a593Smuzhiyun  * @count:	Guest CP0_Count value to set.
600*4882a593Smuzhiyun  *
601*4882a593Smuzhiyun  * Sets the CP0_Count value and updates the timer accordingly.
602*4882a593Smuzhiyun  */
kvm_mips_write_count(struct kvm_vcpu * vcpu,u32 count)603*4882a593Smuzhiyun void kvm_mips_write_count(struct kvm_vcpu *vcpu, u32 count)
604*4882a593Smuzhiyun {
605*4882a593Smuzhiyun 	struct mips_coproc *cop0 = vcpu->arch.cop0;
606*4882a593Smuzhiyun 	ktime_t now;
607*4882a593Smuzhiyun 
608*4882a593Smuzhiyun 	/* Calculate bias */
609*4882a593Smuzhiyun 	now = kvm_mips_count_time(vcpu);
610*4882a593Smuzhiyun 	vcpu->arch.count_bias = count - kvm_mips_ktime_to_count(vcpu, now);
611*4882a593Smuzhiyun 
612*4882a593Smuzhiyun 	if (kvm_mips_count_disabled(vcpu))
613*4882a593Smuzhiyun 		/* The timer's disabled, adjust the static count */
614*4882a593Smuzhiyun 		kvm_write_c0_guest_count(cop0, count);
615*4882a593Smuzhiyun 	else
616*4882a593Smuzhiyun 		/* Update timeout */
617*4882a593Smuzhiyun 		kvm_mips_resume_hrtimer(vcpu, now, count);
618*4882a593Smuzhiyun }
619*4882a593Smuzhiyun 
620*4882a593Smuzhiyun /**
621*4882a593Smuzhiyun  * kvm_mips_init_count() - Initialise timer.
622*4882a593Smuzhiyun  * @vcpu:	Virtual CPU.
623*4882a593Smuzhiyun  * @count_hz:	Frequency of timer.
624*4882a593Smuzhiyun  *
625*4882a593Smuzhiyun  * Initialise the timer to the specified frequency, zero it, and set it going if
626*4882a593Smuzhiyun  * it's enabled.
627*4882a593Smuzhiyun  */
kvm_mips_init_count(struct kvm_vcpu * vcpu,unsigned long count_hz)628*4882a593Smuzhiyun void kvm_mips_init_count(struct kvm_vcpu *vcpu, unsigned long count_hz)
629*4882a593Smuzhiyun {
630*4882a593Smuzhiyun 	vcpu->arch.count_hz = count_hz;
631*4882a593Smuzhiyun 	vcpu->arch.count_period = div_u64((u64)NSEC_PER_SEC << 32, count_hz);
632*4882a593Smuzhiyun 	vcpu->arch.count_dyn_bias = 0;
633*4882a593Smuzhiyun 
634*4882a593Smuzhiyun 	/* Starting at 0 */
635*4882a593Smuzhiyun 	kvm_mips_write_count(vcpu, 0);
636*4882a593Smuzhiyun }
637*4882a593Smuzhiyun 
638*4882a593Smuzhiyun /**
639*4882a593Smuzhiyun  * kvm_mips_set_count_hz() - Update the frequency of the timer.
640*4882a593Smuzhiyun  * @vcpu:	Virtual CPU.
641*4882a593Smuzhiyun  * @count_hz:	Frequency of CP0_Count timer in Hz.
642*4882a593Smuzhiyun  *
643*4882a593Smuzhiyun  * Change the frequency of the CP0_Count timer. This is done atomically so that
644*4882a593Smuzhiyun  * CP0_Count is continuous and no timer interrupt is lost.
645*4882a593Smuzhiyun  *
646*4882a593Smuzhiyun  * Returns:	-EINVAL if @count_hz is out of range.
647*4882a593Smuzhiyun  *		0 on success.
648*4882a593Smuzhiyun  */
kvm_mips_set_count_hz(struct kvm_vcpu * vcpu,s64 count_hz)649*4882a593Smuzhiyun int kvm_mips_set_count_hz(struct kvm_vcpu *vcpu, s64 count_hz)
650*4882a593Smuzhiyun {
651*4882a593Smuzhiyun 	struct mips_coproc *cop0 = vcpu->arch.cop0;
652*4882a593Smuzhiyun 	int dc;
653*4882a593Smuzhiyun 	ktime_t now;
654*4882a593Smuzhiyun 	u32 count;
655*4882a593Smuzhiyun 
656*4882a593Smuzhiyun 	/* ensure the frequency is in a sensible range... */
657*4882a593Smuzhiyun 	if (count_hz <= 0 || count_hz > NSEC_PER_SEC)
658*4882a593Smuzhiyun 		return -EINVAL;
659*4882a593Smuzhiyun 	/* ... and has actually changed */
660*4882a593Smuzhiyun 	if (vcpu->arch.count_hz == count_hz)
661*4882a593Smuzhiyun 		return 0;
662*4882a593Smuzhiyun 
663*4882a593Smuzhiyun 	/* Safely freeze timer so we can keep it continuous */
664*4882a593Smuzhiyun 	dc = kvm_mips_count_disabled(vcpu);
665*4882a593Smuzhiyun 	if (dc) {
666*4882a593Smuzhiyun 		now = kvm_mips_count_time(vcpu);
667*4882a593Smuzhiyun 		count = kvm_read_c0_guest_count(cop0);
668*4882a593Smuzhiyun 	} else {
669*4882a593Smuzhiyun 		now = kvm_mips_freeze_hrtimer(vcpu, &count);
670*4882a593Smuzhiyun 	}
671*4882a593Smuzhiyun 
672*4882a593Smuzhiyun 	/* Update the frequency */
673*4882a593Smuzhiyun 	vcpu->arch.count_hz = count_hz;
674*4882a593Smuzhiyun 	vcpu->arch.count_period = div_u64((u64)NSEC_PER_SEC << 32, count_hz);
675*4882a593Smuzhiyun 	vcpu->arch.count_dyn_bias = 0;
676*4882a593Smuzhiyun 
677*4882a593Smuzhiyun 	/* Calculate adjusted bias so dynamic count is unchanged */
678*4882a593Smuzhiyun 	vcpu->arch.count_bias = count - kvm_mips_ktime_to_count(vcpu, now);
679*4882a593Smuzhiyun 
680*4882a593Smuzhiyun 	/* Update and resume hrtimer */
681*4882a593Smuzhiyun 	if (!dc)
682*4882a593Smuzhiyun 		kvm_mips_resume_hrtimer(vcpu, now, count);
683*4882a593Smuzhiyun 	return 0;
684*4882a593Smuzhiyun }
685*4882a593Smuzhiyun 
686*4882a593Smuzhiyun /**
687*4882a593Smuzhiyun  * kvm_mips_write_compare() - Modify compare and update timer.
688*4882a593Smuzhiyun  * @vcpu:	Virtual CPU.
689*4882a593Smuzhiyun  * @compare:	New CP0_Compare value.
690*4882a593Smuzhiyun  * @ack:	Whether to acknowledge timer interrupt.
691*4882a593Smuzhiyun  *
692*4882a593Smuzhiyun  * Update CP0_Compare to a new value and update the timeout.
693*4882a593Smuzhiyun  * If @ack, atomically acknowledge any pending timer interrupt, otherwise ensure
694*4882a593Smuzhiyun  * any pending timer interrupt is preserved.
695*4882a593Smuzhiyun  */
kvm_mips_write_compare(struct kvm_vcpu * vcpu,u32 compare,bool ack)696*4882a593Smuzhiyun void kvm_mips_write_compare(struct kvm_vcpu *vcpu, u32 compare, bool ack)
697*4882a593Smuzhiyun {
698*4882a593Smuzhiyun 	struct mips_coproc *cop0 = vcpu->arch.cop0;
699*4882a593Smuzhiyun 	int dc;
700*4882a593Smuzhiyun 	u32 old_compare = kvm_read_c0_guest_compare(cop0);
701*4882a593Smuzhiyun 	s32 delta = compare - old_compare;
702*4882a593Smuzhiyun 	u32 cause;
703*4882a593Smuzhiyun 	ktime_t now = ktime_set(0, 0); /* silence bogus GCC warning */
704*4882a593Smuzhiyun 	u32 count;
705*4882a593Smuzhiyun 
706*4882a593Smuzhiyun 	/* if unchanged, must just be an ack */
707*4882a593Smuzhiyun 	if (old_compare == compare) {
708*4882a593Smuzhiyun 		if (!ack)
709*4882a593Smuzhiyun 			return;
710*4882a593Smuzhiyun 		kvm_mips_callbacks->dequeue_timer_int(vcpu);
711*4882a593Smuzhiyun 		kvm_write_c0_guest_compare(cop0, compare);
712*4882a593Smuzhiyun 		return;
713*4882a593Smuzhiyun 	}
714*4882a593Smuzhiyun 
715*4882a593Smuzhiyun 	/*
716*4882a593Smuzhiyun 	 * If guest CP0_Compare moves forward, CP0_GTOffset should be adjusted
717*4882a593Smuzhiyun 	 * too to prevent guest CP0_Count hitting guest CP0_Compare.
718*4882a593Smuzhiyun 	 *
719*4882a593Smuzhiyun 	 * The new GTOffset corresponds to the new value of CP0_Compare, and is
720*4882a593Smuzhiyun 	 * set prior to it being written into the guest context. We disable
721*4882a593Smuzhiyun 	 * preemption until the new value is written to prevent restore of a
722*4882a593Smuzhiyun 	 * GTOffset corresponding to the old CP0_Compare value.
723*4882a593Smuzhiyun 	 */
724*4882a593Smuzhiyun 	if (IS_ENABLED(CONFIG_KVM_MIPS_VZ) && delta > 0) {
725*4882a593Smuzhiyun 		preempt_disable();
726*4882a593Smuzhiyun 		write_c0_gtoffset(compare - read_c0_count());
727*4882a593Smuzhiyun 		back_to_back_c0_hazard();
728*4882a593Smuzhiyun 	}
729*4882a593Smuzhiyun 
730*4882a593Smuzhiyun 	/* freeze_hrtimer() takes care of timer interrupts <= count */
731*4882a593Smuzhiyun 	dc = kvm_mips_count_disabled(vcpu);
732*4882a593Smuzhiyun 	if (!dc)
733*4882a593Smuzhiyun 		now = kvm_mips_freeze_hrtimer(vcpu, &count);
734*4882a593Smuzhiyun 
735*4882a593Smuzhiyun 	if (ack)
736*4882a593Smuzhiyun 		kvm_mips_callbacks->dequeue_timer_int(vcpu);
737*4882a593Smuzhiyun 	else if (IS_ENABLED(CONFIG_KVM_MIPS_VZ))
738*4882a593Smuzhiyun 		/*
739*4882a593Smuzhiyun 		 * With VZ, writing CP0_Compare acks (clears) CP0_Cause.TI, so
740*4882a593Smuzhiyun 		 * preserve guest CP0_Cause.TI if we don't want to ack it.
741*4882a593Smuzhiyun 		 */
742*4882a593Smuzhiyun 		cause = kvm_read_c0_guest_cause(cop0);
743*4882a593Smuzhiyun 
744*4882a593Smuzhiyun 	kvm_write_c0_guest_compare(cop0, compare);
745*4882a593Smuzhiyun 
746*4882a593Smuzhiyun 	if (IS_ENABLED(CONFIG_KVM_MIPS_VZ)) {
747*4882a593Smuzhiyun 		if (delta > 0)
748*4882a593Smuzhiyun 			preempt_enable();
749*4882a593Smuzhiyun 
750*4882a593Smuzhiyun 		back_to_back_c0_hazard();
751*4882a593Smuzhiyun 
752*4882a593Smuzhiyun 		if (!ack && cause & CAUSEF_TI)
753*4882a593Smuzhiyun 			kvm_write_c0_guest_cause(cop0, cause);
754*4882a593Smuzhiyun 	}
755*4882a593Smuzhiyun 
756*4882a593Smuzhiyun 	/* resume_hrtimer() takes care of timer interrupts > count */
757*4882a593Smuzhiyun 	if (!dc)
758*4882a593Smuzhiyun 		kvm_mips_resume_hrtimer(vcpu, now, count);
759*4882a593Smuzhiyun 
760*4882a593Smuzhiyun 	/*
761*4882a593Smuzhiyun 	 * If guest CP0_Compare is moving backward, we delay CP0_GTOffset change
762*4882a593Smuzhiyun 	 * until after the new CP0_Compare is written, otherwise new guest
763*4882a593Smuzhiyun 	 * CP0_Count could hit new guest CP0_Compare.
764*4882a593Smuzhiyun 	 */
765*4882a593Smuzhiyun 	if (IS_ENABLED(CONFIG_KVM_MIPS_VZ) && delta <= 0)
766*4882a593Smuzhiyun 		write_c0_gtoffset(compare - read_c0_count());
767*4882a593Smuzhiyun }
768*4882a593Smuzhiyun 
769*4882a593Smuzhiyun /**
770*4882a593Smuzhiyun  * kvm_mips_count_disable() - Disable count.
771*4882a593Smuzhiyun  * @vcpu:	Virtual CPU.
772*4882a593Smuzhiyun  *
773*4882a593Smuzhiyun  * Disable the CP0_Count timer. A timer interrupt on or before the final stop
774*4882a593Smuzhiyun  * time will be handled but not after.
775*4882a593Smuzhiyun  *
776*4882a593Smuzhiyun  * Assumes CP0_Count was previously enabled but now Guest.CP0_Cause.DC or
777*4882a593Smuzhiyun  * count_ctl.DC has been set (count disabled).
778*4882a593Smuzhiyun  *
779*4882a593Smuzhiyun  * Returns:	The time that the timer was stopped.
780*4882a593Smuzhiyun  */
kvm_mips_count_disable(struct kvm_vcpu * vcpu)781*4882a593Smuzhiyun static ktime_t kvm_mips_count_disable(struct kvm_vcpu *vcpu)
782*4882a593Smuzhiyun {
783*4882a593Smuzhiyun 	struct mips_coproc *cop0 = vcpu->arch.cop0;
784*4882a593Smuzhiyun 	u32 count;
785*4882a593Smuzhiyun 	ktime_t now;
786*4882a593Smuzhiyun 
787*4882a593Smuzhiyun 	/* Stop hrtimer */
788*4882a593Smuzhiyun 	hrtimer_cancel(&vcpu->arch.comparecount_timer);
789*4882a593Smuzhiyun 
790*4882a593Smuzhiyun 	/* Set the static count from the dynamic count, handling pending TI */
791*4882a593Smuzhiyun 	now = ktime_get();
792*4882a593Smuzhiyun 	count = kvm_mips_read_count_running(vcpu, now);
793*4882a593Smuzhiyun 	kvm_write_c0_guest_count(cop0, count);
794*4882a593Smuzhiyun 
795*4882a593Smuzhiyun 	return now;
796*4882a593Smuzhiyun }
797*4882a593Smuzhiyun 
798*4882a593Smuzhiyun /**
799*4882a593Smuzhiyun  * kvm_mips_count_disable_cause() - Disable count using CP0_Cause.DC.
800*4882a593Smuzhiyun  * @vcpu:	Virtual CPU.
801*4882a593Smuzhiyun  *
802*4882a593Smuzhiyun  * Disable the CP0_Count timer and set CP0_Cause.DC. A timer interrupt on or
803*4882a593Smuzhiyun  * before the final stop time will be handled if the timer isn't disabled by
804*4882a593Smuzhiyun  * count_ctl.DC, but not after.
805*4882a593Smuzhiyun  *
806*4882a593Smuzhiyun  * Assumes CP0_Cause.DC is clear (count enabled).
807*4882a593Smuzhiyun  */
kvm_mips_count_disable_cause(struct kvm_vcpu * vcpu)808*4882a593Smuzhiyun void kvm_mips_count_disable_cause(struct kvm_vcpu *vcpu)
809*4882a593Smuzhiyun {
810*4882a593Smuzhiyun 	struct mips_coproc *cop0 = vcpu->arch.cop0;
811*4882a593Smuzhiyun 
812*4882a593Smuzhiyun 	kvm_set_c0_guest_cause(cop0, CAUSEF_DC);
813*4882a593Smuzhiyun 	if (!(vcpu->arch.count_ctl & KVM_REG_MIPS_COUNT_CTL_DC))
814*4882a593Smuzhiyun 		kvm_mips_count_disable(vcpu);
815*4882a593Smuzhiyun }
816*4882a593Smuzhiyun 
817*4882a593Smuzhiyun /**
818*4882a593Smuzhiyun  * kvm_mips_count_enable_cause() - Enable count using CP0_Cause.DC.
819*4882a593Smuzhiyun  * @vcpu:	Virtual CPU.
820*4882a593Smuzhiyun  *
821*4882a593Smuzhiyun  * Enable the CP0_Count timer and clear CP0_Cause.DC. A timer interrupt after
822*4882a593Smuzhiyun  * the start time will be handled if the timer isn't disabled by count_ctl.DC,
823*4882a593Smuzhiyun  * potentially before even returning, so the caller should be careful with
824*4882a593Smuzhiyun  * ordering of CP0_Cause modifications so as not to lose it.
825*4882a593Smuzhiyun  *
826*4882a593Smuzhiyun  * Assumes CP0_Cause.DC is set (count disabled).
827*4882a593Smuzhiyun  */
kvm_mips_count_enable_cause(struct kvm_vcpu * vcpu)828*4882a593Smuzhiyun void kvm_mips_count_enable_cause(struct kvm_vcpu *vcpu)
829*4882a593Smuzhiyun {
830*4882a593Smuzhiyun 	struct mips_coproc *cop0 = vcpu->arch.cop0;
831*4882a593Smuzhiyun 	u32 count;
832*4882a593Smuzhiyun 
833*4882a593Smuzhiyun 	kvm_clear_c0_guest_cause(cop0, CAUSEF_DC);
834*4882a593Smuzhiyun 
835*4882a593Smuzhiyun 	/*
836*4882a593Smuzhiyun 	 * Set the dynamic count to match the static count.
837*4882a593Smuzhiyun 	 * This starts the hrtimer if count_ctl.DC allows it.
838*4882a593Smuzhiyun 	 * Otherwise it conveniently updates the biases.
839*4882a593Smuzhiyun 	 */
840*4882a593Smuzhiyun 	count = kvm_read_c0_guest_count(cop0);
841*4882a593Smuzhiyun 	kvm_mips_write_count(vcpu, count);
842*4882a593Smuzhiyun }
843*4882a593Smuzhiyun 
844*4882a593Smuzhiyun /**
845*4882a593Smuzhiyun  * kvm_mips_set_count_ctl() - Update the count control KVM register.
846*4882a593Smuzhiyun  * @vcpu:	Virtual CPU.
847*4882a593Smuzhiyun  * @count_ctl:	Count control register new value.
848*4882a593Smuzhiyun  *
849*4882a593Smuzhiyun  * Set the count control KVM register. The timer is updated accordingly.
850*4882a593Smuzhiyun  *
851*4882a593Smuzhiyun  * Returns:	-EINVAL if reserved bits are set.
852*4882a593Smuzhiyun  *		0 on success.
853*4882a593Smuzhiyun  */
kvm_mips_set_count_ctl(struct kvm_vcpu * vcpu,s64 count_ctl)854*4882a593Smuzhiyun int kvm_mips_set_count_ctl(struct kvm_vcpu *vcpu, s64 count_ctl)
855*4882a593Smuzhiyun {
856*4882a593Smuzhiyun 	struct mips_coproc *cop0 = vcpu->arch.cop0;
857*4882a593Smuzhiyun 	s64 changed = count_ctl ^ vcpu->arch.count_ctl;
858*4882a593Smuzhiyun 	s64 delta;
859*4882a593Smuzhiyun 	ktime_t expire, now;
860*4882a593Smuzhiyun 	u32 count, compare;
861*4882a593Smuzhiyun 
862*4882a593Smuzhiyun 	/* Only allow defined bits to be changed */
863*4882a593Smuzhiyun 	if (changed & ~(s64)(KVM_REG_MIPS_COUNT_CTL_DC))
864*4882a593Smuzhiyun 		return -EINVAL;
865*4882a593Smuzhiyun 
866*4882a593Smuzhiyun 	/* Apply new value */
867*4882a593Smuzhiyun 	vcpu->arch.count_ctl = count_ctl;
868*4882a593Smuzhiyun 
869*4882a593Smuzhiyun 	/* Master CP0_Count disable */
870*4882a593Smuzhiyun 	if (changed & KVM_REG_MIPS_COUNT_CTL_DC) {
871*4882a593Smuzhiyun 		/* Is CP0_Cause.DC already disabling CP0_Count? */
872*4882a593Smuzhiyun 		if (kvm_read_c0_guest_cause(cop0) & CAUSEF_DC) {
873*4882a593Smuzhiyun 			if (count_ctl & KVM_REG_MIPS_COUNT_CTL_DC)
874*4882a593Smuzhiyun 				/* Just record the current time */
875*4882a593Smuzhiyun 				vcpu->arch.count_resume = ktime_get();
876*4882a593Smuzhiyun 		} else if (count_ctl & KVM_REG_MIPS_COUNT_CTL_DC) {
877*4882a593Smuzhiyun 			/* disable timer and record current time */
878*4882a593Smuzhiyun 			vcpu->arch.count_resume = kvm_mips_count_disable(vcpu);
879*4882a593Smuzhiyun 		} else {
880*4882a593Smuzhiyun 			/*
881*4882a593Smuzhiyun 			 * Calculate timeout relative to static count at resume
882*4882a593Smuzhiyun 			 * time (wrap 0 to 2^32).
883*4882a593Smuzhiyun 			 */
884*4882a593Smuzhiyun 			count = kvm_read_c0_guest_count(cop0);
885*4882a593Smuzhiyun 			compare = kvm_read_c0_guest_compare(cop0);
886*4882a593Smuzhiyun 			delta = (u64)(u32)(compare - count - 1) + 1;
887*4882a593Smuzhiyun 			delta = div_u64(delta * NSEC_PER_SEC,
888*4882a593Smuzhiyun 					vcpu->arch.count_hz);
889*4882a593Smuzhiyun 			expire = ktime_add_ns(vcpu->arch.count_resume, delta);
890*4882a593Smuzhiyun 
891*4882a593Smuzhiyun 			/* Handle pending interrupt */
892*4882a593Smuzhiyun 			now = ktime_get();
893*4882a593Smuzhiyun 			if (ktime_compare(now, expire) >= 0)
894*4882a593Smuzhiyun 				/* Nothing should be waiting on the timeout */
895*4882a593Smuzhiyun 				kvm_mips_callbacks->queue_timer_int(vcpu);
896*4882a593Smuzhiyun 
897*4882a593Smuzhiyun 			/* Resume hrtimer without changing bias */
898*4882a593Smuzhiyun 			count = kvm_mips_read_count_running(vcpu, now);
899*4882a593Smuzhiyun 			kvm_mips_resume_hrtimer(vcpu, now, count);
900*4882a593Smuzhiyun 		}
901*4882a593Smuzhiyun 	}
902*4882a593Smuzhiyun 
903*4882a593Smuzhiyun 	return 0;
904*4882a593Smuzhiyun }
905*4882a593Smuzhiyun 
906*4882a593Smuzhiyun /**
907*4882a593Smuzhiyun  * kvm_mips_set_count_resume() - Update the count resume KVM register.
908*4882a593Smuzhiyun  * @vcpu:		Virtual CPU.
909*4882a593Smuzhiyun  * @count_resume:	Count resume register new value.
910*4882a593Smuzhiyun  *
911*4882a593Smuzhiyun  * Set the count resume KVM register.
912*4882a593Smuzhiyun  *
913*4882a593Smuzhiyun  * Returns:	-EINVAL if out of valid range (0..now).
914*4882a593Smuzhiyun  *		0 on success.
915*4882a593Smuzhiyun  */
kvm_mips_set_count_resume(struct kvm_vcpu * vcpu,s64 count_resume)916*4882a593Smuzhiyun int kvm_mips_set_count_resume(struct kvm_vcpu *vcpu, s64 count_resume)
917*4882a593Smuzhiyun {
918*4882a593Smuzhiyun 	/*
919*4882a593Smuzhiyun 	 * It doesn't make sense for the resume time to be in the future, as it
920*4882a593Smuzhiyun 	 * would be possible for the next interrupt to be more than a full
921*4882a593Smuzhiyun 	 * period in the future.
922*4882a593Smuzhiyun 	 */
923*4882a593Smuzhiyun 	if (count_resume < 0 || count_resume > ktime_to_ns(ktime_get()))
924*4882a593Smuzhiyun 		return -EINVAL;
925*4882a593Smuzhiyun 
926*4882a593Smuzhiyun 	vcpu->arch.count_resume = ns_to_ktime(count_resume);
927*4882a593Smuzhiyun 	return 0;
928*4882a593Smuzhiyun }
929*4882a593Smuzhiyun 
930*4882a593Smuzhiyun /**
931*4882a593Smuzhiyun  * kvm_mips_count_timeout() - Push timer forward on timeout.
932*4882a593Smuzhiyun  * @vcpu:	Virtual CPU.
933*4882a593Smuzhiyun  *
934*4882a593Smuzhiyun  * Handle an hrtimer event by push the hrtimer forward a period.
935*4882a593Smuzhiyun  *
936*4882a593Smuzhiyun  * Returns:	The hrtimer_restart value to return to the hrtimer subsystem.
937*4882a593Smuzhiyun  */
kvm_mips_count_timeout(struct kvm_vcpu * vcpu)938*4882a593Smuzhiyun enum hrtimer_restart kvm_mips_count_timeout(struct kvm_vcpu *vcpu)
939*4882a593Smuzhiyun {
940*4882a593Smuzhiyun 	/* Add the Count period to the current expiry time */
941*4882a593Smuzhiyun 	hrtimer_add_expires_ns(&vcpu->arch.comparecount_timer,
942*4882a593Smuzhiyun 			       vcpu->arch.count_period);
943*4882a593Smuzhiyun 	return HRTIMER_RESTART;
944*4882a593Smuzhiyun }
945*4882a593Smuzhiyun 
kvm_mips_emul_eret(struct kvm_vcpu * vcpu)946*4882a593Smuzhiyun enum emulation_result kvm_mips_emul_eret(struct kvm_vcpu *vcpu)
947*4882a593Smuzhiyun {
948*4882a593Smuzhiyun 	struct mips_coproc *cop0 = vcpu->arch.cop0;
949*4882a593Smuzhiyun 	enum emulation_result er = EMULATE_DONE;
950*4882a593Smuzhiyun 
951*4882a593Smuzhiyun 	if (kvm_read_c0_guest_status(cop0) & ST0_ERL) {
952*4882a593Smuzhiyun 		kvm_clear_c0_guest_status(cop0, ST0_ERL);
953*4882a593Smuzhiyun 		vcpu->arch.pc = kvm_read_c0_guest_errorepc(cop0);
954*4882a593Smuzhiyun 	} else if (kvm_read_c0_guest_status(cop0) & ST0_EXL) {
955*4882a593Smuzhiyun 		kvm_debug("[%#lx] ERET to %#lx\n", vcpu->arch.pc,
956*4882a593Smuzhiyun 			  kvm_read_c0_guest_epc(cop0));
957*4882a593Smuzhiyun 		kvm_clear_c0_guest_status(cop0, ST0_EXL);
958*4882a593Smuzhiyun 		vcpu->arch.pc = kvm_read_c0_guest_epc(cop0);
959*4882a593Smuzhiyun 
960*4882a593Smuzhiyun 	} else {
961*4882a593Smuzhiyun 		kvm_err("[%#lx] ERET when MIPS_SR_EXL|MIPS_SR_ERL == 0\n",
962*4882a593Smuzhiyun 			vcpu->arch.pc);
963*4882a593Smuzhiyun 		er = EMULATE_FAIL;
964*4882a593Smuzhiyun 	}
965*4882a593Smuzhiyun 
966*4882a593Smuzhiyun 	return er;
967*4882a593Smuzhiyun }
968*4882a593Smuzhiyun 
kvm_mips_emul_wait(struct kvm_vcpu * vcpu)969*4882a593Smuzhiyun enum emulation_result kvm_mips_emul_wait(struct kvm_vcpu *vcpu)
970*4882a593Smuzhiyun {
971*4882a593Smuzhiyun 	kvm_debug("[%#lx] !!!WAIT!!! (%#lx)\n", vcpu->arch.pc,
972*4882a593Smuzhiyun 		  vcpu->arch.pending_exceptions);
973*4882a593Smuzhiyun 
974*4882a593Smuzhiyun 	++vcpu->stat.wait_exits;
975*4882a593Smuzhiyun 	trace_kvm_exit(vcpu, KVM_TRACE_EXIT_WAIT);
976*4882a593Smuzhiyun 	if (!vcpu->arch.pending_exceptions) {
977*4882a593Smuzhiyun 		kvm_vz_lose_htimer(vcpu);
978*4882a593Smuzhiyun 		vcpu->arch.wait = 1;
979*4882a593Smuzhiyun 		kvm_vcpu_block(vcpu);
980*4882a593Smuzhiyun 
981*4882a593Smuzhiyun 		/*
982*4882a593Smuzhiyun 		 * We we are runnable, then definitely go off to user space to
983*4882a593Smuzhiyun 		 * check if any I/O interrupts are pending.
984*4882a593Smuzhiyun 		 */
985*4882a593Smuzhiyun 		if (kvm_check_request(KVM_REQ_UNHALT, vcpu)) {
986*4882a593Smuzhiyun 			kvm_clear_request(KVM_REQ_UNHALT, vcpu);
987*4882a593Smuzhiyun 			vcpu->run->exit_reason = KVM_EXIT_IRQ_WINDOW_OPEN;
988*4882a593Smuzhiyun 		}
989*4882a593Smuzhiyun 	}
990*4882a593Smuzhiyun 
991*4882a593Smuzhiyun 	return EMULATE_DONE;
992*4882a593Smuzhiyun }
993*4882a593Smuzhiyun 
kvm_mips_change_entryhi(struct kvm_vcpu * vcpu,unsigned long entryhi)994*4882a593Smuzhiyun static void kvm_mips_change_entryhi(struct kvm_vcpu *vcpu,
995*4882a593Smuzhiyun 				    unsigned long entryhi)
996*4882a593Smuzhiyun {
997*4882a593Smuzhiyun 	struct mips_coproc *cop0 = vcpu->arch.cop0;
998*4882a593Smuzhiyun 	struct mm_struct *kern_mm = &vcpu->arch.guest_kernel_mm;
999*4882a593Smuzhiyun 	int cpu, i;
1000*4882a593Smuzhiyun 	u32 nasid = entryhi & KVM_ENTRYHI_ASID;
1001*4882a593Smuzhiyun 
1002*4882a593Smuzhiyun 	if (((kvm_read_c0_guest_entryhi(cop0) & KVM_ENTRYHI_ASID) != nasid)) {
1003*4882a593Smuzhiyun 		trace_kvm_asid_change(vcpu, kvm_read_c0_guest_entryhi(cop0) &
1004*4882a593Smuzhiyun 				      KVM_ENTRYHI_ASID, nasid);
1005*4882a593Smuzhiyun 
1006*4882a593Smuzhiyun 		/*
1007*4882a593Smuzhiyun 		 * Flush entries from the GVA page tables.
1008*4882a593Smuzhiyun 		 * Guest user page table will get flushed lazily on re-entry to
1009*4882a593Smuzhiyun 		 * guest user if the guest ASID actually changes.
1010*4882a593Smuzhiyun 		 */
1011*4882a593Smuzhiyun 		kvm_mips_flush_gva_pt(kern_mm->pgd, KMF_KERN);
1012*4882a593Smuzhiyun 
1013*4882a593Smuzhiyun 		/*
1014*4882a593Smuzhiyun 		 * Regenerate/invalidate kernel MMU context.
1015*4882a593Smuzhiyun 		 * The user MMU context will be regenerated lazily on re-entry
1016*4882a593Smuzhiyun 		 * to guest user if the guest ASID actually changes.
1017*4882a593Smuzhiyun 		 */
1018*4882a593Smuzhiyun 		preempt_disable();
1019*4882a593Smuzhiyun 		cpu = smp_processor_id();
1020*4882a593Smuzhiyun 		get_new_mmu_context(kern_mm);
1021*4882a593Smuzhiyun 		for_each_possible_cpu(i)
1022*4882a593Smuzhiyun 			if (i != cpu)
1023*4882a593Smuzhiyun 				set_cpu_context(i, kern_mm, 0);
1024*4882a593Smuzhiyun 		preempt_enable();
1025*4882a593Smuzhiyun 	}
1026*4882a593Smuzhiyun 	kvm_write_c0_guest_entryhi(cop0, entryhi);
1027*4882a593Smuzhiyun }
1028*4882a593Smuzhiyun 
kvm_mips_emul_tlbr(struct kvm_vcpu * vcpu)1029*4882a593Smuzhiyun enum emulation_result kvm_mips_emul_tlbr(struct kvm_vcpu *vcpu)
1030*4882a593Smuzhiyun {
1031*4882a593Smuzhiyun 	struct mips_coproc *cop0 = vcpu->arch.cop0;
1032*4882a593Smuzhiyun 	struct kvm_mips_tlb *tlb;
1033*4882a593Smuzhiyun 	unsigned long pc = vcpu->arch.pc;
1034*4882a593Smuzhiyun 	int index;
1035*4882a593Smuzhiyun 
1036*4882a593Smuzhiyun 	index = kvm_read_c0_guest_index(cop0);
1037*4882a593Smuzhiyun 	if (index < 0 || index >= KVM_MIPS_GUEST_TLB_SIZE) {
1038*4882a593Smuzhiyun 		/* UNDEFINED */
1039*4882a593Smuzhiyun 		kvm_debug("[%#lx] TLBR Index %#x out of range\n", pc, index);
1040*4882a593Smuzhiyun 		index &= KVM_MIPS_GUEST_TLB_SIZE - 1;
1041*4882a593Smuzhiyun 	}
1042*4882a593Smuzhiyun 
1043*4882a593Smuzhiyun 	tlb = &vcpu->arch.guest_tlb[index];
1044*4882a593Smuzhiyun 	kvm_write_c0_guest_pagemask(cop0, tlb->tlb_mask);
1045*4882a593Smuzhiyun 	kvm_write_c0_guest_entrylo0(cop0, tlb->tlb_lo[0]);
1046*4882a593Smuzhiyun 	kvm_write_c0_guest_entrylo1(cop0, tlb->tlb_lo[1]);
1047*4882a593Smuzhiyun 	kvm_mips_change_entryhi(vcpu, tlb->tlb_hi);
1048*4882a593Smuzhiyun 
1049*4882a593Smuzhiyun 	return EMULATE_DONE;
1050*4882a593Smuzhiyun }
1051*4882a593Smuzhiyun 
1052*4882a593Smuzhiyun /**
1053*4882a593Smuzhiyun  * kvm_mips_invalidate_guest_tlb() - Indicates a change in guest MMU map.
1054*4882a593Smuzhiyun  * @vcpu:	VCPU with changed mappings.
1055*4882a593Smuzhiyun  * @tlb:	TLB entry being removed.
1056*4882a593Smuzhiyun  *
1057*4882a593Smuzhiyun  * This is called to indicate a single change in guest MMU mappings, so that we
1058*4882a593Smuzhiyun  * can arrange TLB flushes on this and other CPUs.
1059*4882a593Smuzhiyun  */
kvm_mips_invalidate_guest_tlb(struct kvm_vcpu * vcpu,struct kvm_mips_tlb * tlb)1060*4882a593Smuzhiyun static void kvm_mips_invalidate_guest_tlb(struct kvm_vcpu *vcpu,
1061*4882a593Smuzhiyun 					  struct kvm_mips_tlb *tlb)
1062*4882a593Smuzhiyun {
1063*4882a593Smuzhiyun 	struct mm_struct *kern_mm = &vcpu->arch.guest_kernel_mm;
1064*4882a593Smuzhiyun 	struct mm_struct *user_mm = &vcpu->arch.guest_user_mm;
1065*4882a593Smuzhiyun 	int cpu, i;
1066*4882a593Smuzhiyun 	bool user;
1067*4882a593Smuzhiyun 
1068*4882a593Smuzhiyun 	/* No need to flush for entries which are already invalid */
1069*4882a593Smuzhiyun 	if (!((tlb->tlb_lo[0] | tlb->tlb_lo[1]) & ENTRYLO_V))
1070*4882a593Smuzhiyun 		return;
1071*4882a593Smuzhiyun 	/* Don't touch host kernel page tables or TLB mappings */
1072*4882a593Smuzhiyun 	if ((unsigned long)tlb->tlb_hi > 0x7fffffff)
1073*4882a593Smuzhiyun 		return;
1074*4882a593Smuzhiyun 	/* User address space doesn't need flushing for KSeg2/3 changes */
1075*4882a593Smuzhiyun 	user = tlb->tlb_hi < KVM_GUEST_KSEG0;
1076*4882a593Smuzhiyun 
1077*4882a593Smuzhiyun 	preempt_disable();
1078*4882a593Smuzhiyun 
1079*4882a593Smuzhiyun 	/* Invalidate page table entries */
1080*4882a593Smuzhiyun 	kvm_trap_emul_invalidate_gva(vcpu, tlb->tlb_hi & VPN2_MASK, user);
1081*4882a593Smuzhiyun 
1082*4882a593Smuzhiyun 	/*
1083*4882a593Smuzhiyun 	 * Probe the shadow host TLB for the entry being overwritten, if one
1084*4882a593Smuzhiyun 	 * matches, invalidate it
1085*4882a593Smuzhiyun 	 */
1086*4882a593Smuzhiyun 	kvm_mips_host_tlb_inv(vcpu, tlb->tlb_hi, user, true);
1087*4882a593Smuzhiyun 
1088*4882a593Smuzhiyun 	/* Invalidate the whole ASID on other CPUs */
1089*4882a593Smuzhiyun 	cpu = smp_processor_id();
1090*4882a593Smuzhiyun 	for_each_possible_cpu(i) {
1091*4882a593Smuzhiyun 		if (i == cpu)
1092*4882a593Smuzhiyun 			continue;
1093*4882a593Smuzhiyun 		if (user)
1094*4882a593Smuzhiyun 			set_cpu_context(i, user_mm, 0);
1095*4882a593Smuzhiyun 		set_cpu_context(i, kern_mm, 0);
1096*4882a593Smuzhiyun 	}
1097*4882a593Smuzhiyun 
1098*4882a593Smuzhiyun 	preempt_enable();
1099*4882a593Smuzhiyun }
1100*4882a593Smuzhiyun 
1101*4882a593Smuzhiyun /* Write Guest TLB Entry @ Index */
kvm_mips_emul_tlbwi(struct kvm_vcpu * vcpu)1102*4882a593Smuzhiyun enum emulation_result kvm_mips_emul_tlbwi(struct kvm_vcpu *vcpu)
1103*4882a593Smuzhiyun {
1104*4882a593Smuzhiyun 	struct mips_coproc *cop0 = vcpu->arch.cop0;
1105*4882a593Smuzhiyun 	int index = kvm_read_c0_guest_index(cop0);
1106*4882a593Smuzhiyun 	struct kvm_mips_tlb *tlb = NULL;
1107*4882a593Smuzhiyun 	unsigned long pc = vcpu->arch.pc;
1108*4882a593Smuzhiyun 
1109*4882a593Smuzhiyun 	if (index < 0 || index >= KVM_MIPS_GUEST_TLB_SIZE) {
1110*4882a593Smuzhiyun 		kvm_debug("%s: illegal index: %d\n", __func__, index);
1111*4882a593Smuzhiyun 		kvm_debug("[%#lx] COP0_TLBWI [%d] (entryhi: %#lx, entrylo0: %#lx entrylo1: %#lx, mask: %#lx)\n",
1112*4882a593Smuzhiyun 			  pc, index, kvm_read_c0_guest_entryhi(cop0),
1113*4882a593Smuzhiyun 			  kvm_read_c0_guest_entrylo0(cop0),
1114*4882a593Smuzhiyun 			  kvm_read_c0_guest_entrylo1(cop0),
1115*4882a593Smuzhiyun 			  kvm_read_c0_guest_pagemask(cop0));
1116*4882a593Smuzhiyun 		index = (index & ~0x80000000) % KVM_MIPS_GUEST_TLB_SIZE;
1117*4882a593Smuzhiyun 	}
1118*4882a593Smuzhiyun 
1119*4882a593Smuzhiyun 	tlb = &vcpu->arch.guest_tlb[index];
1120*4882a593Smuzhiyun 
1121*4882a593Smuzhiyun 	kvm_mips_invalidate_guest_tlb(vcpu, tlb);
1122*4882a593Smuzhiyun 
1123*4882a593Smuzhiyun 	tlb->tlb_mask = kvm_read_c0_guest_pagemask(cop0);
1124*4882a593Smuzhiyun 	tlb->tlb_hi = kvm_read_c0_guest_entryhi(cop0);
1125*4882a593Smuzhiyun 	tlb->tlb_lo[0] = kvm_read_c0_guest_entrylo0(cop0);
1126*4882a593Smuzhiyun 	tlb->tlb_lo[1] = kvm_read_c0_guest_entrylo1(cop0);
1127*4882a593Smuzhiyun 
1128*4882a593Smuzhiyun 	kvm_debug("[%#lx] COP0_TLBWI [%d] (entryhi: %#lx, entrylo0: %#lx entrylo1: %#lx, mask: %#lx)\n",
1129*4882a593Smuzhiyun 		  pc, index, kvm_read_c0_guest_entryhi(cop0),
1130*4882a593Smuzhiyun 		  kvm_read_c0_guest_entrylo0(cop0),
1131*4882a593Smuzhiyun 		  kvm_read_c0_guest_entrylo1(cop0),
1132*4882a593Smuzhiyun 		  kvm_read_c0_guest_pagemask(cop0));
1133*4882a593Smuzhiyun 
1134*4882a593Smuzhiyun 	return EMULATE_DONE;
1135*4882a593Smuzhiyun }
1136*4882a593Smuzhiyun 
1137*4882a593Smuzhiyun /* Write Guest TLB Entry @ Random Index */
kvm_mips_emul_tlbwr(struct kvm_vcpu * vcpu)1138*4882a593Smuzhiyun enum emulation_result kvm_mips_emul_tlbwr(struct kvm_vcpu *vcpu)
1139*4882a593Smuzhiyun {
1140*4882a593Smuzhiyun 	struct mips_coproc *cop0 = vcpu->arch.cop0;
1141*4882a593Smuzhiyun 	struct kvm_mips_tlb *tlb = NULL;
1142*4882a593Smuzhiyun 	unsigned long pc = vcpu->arch.pc;
1143*4882a593Smuzhiyun 	int index;
1144*4882a593Smuzhiyun 
1145*4882a593Smuzhiyun 	index = prandom_u32_max(KVM_MIPS_GUEST_TLB_SIZE);
1146*4882a593Smuzhiyun 	tlb = &vcpu->arch.guest_tlb[index];
1147*4882a593Smuzhiyun 
1148*4882a593Smuzhiyun 	kvm_mips_invalidate_guest_tlb(vcpu, tlb);
1149*4882a593Smuzhiyun 
1150*4882a593Smuzhiyun 	tlb->tlb_mask = kvm_read_c0_guest_pagemask(cop0);
1151*4882a593Smuzhiyun 	tlb->tlb_hi = kvm_read_c0_guest_entryhi(cop0);
1152*4882a593Smuzhiyun 	tlb->tlb_lo[0] = kvm_read_c0_guest_entrylo0(cop0);
1153*4882a593Smuzhiyun 	tlb->tlb_lo[1] = kvm_read_c0_guest_entrylo1(cop0);
1154*4882a593Smuzhiyun 
1155*4882a593Smuzhiyun 	kvm_debug("[%#lx] COP0_TLBWR[%d] (entryhi: %#lx, entrylo0: %#lx entrylo1: %#lx)\n",
1156*4882a593Smuzhiyun 		  pc, index, kvm_read_c0_guest_entryhi(cop0),
1157*4882a593Smuzhiyun 		  kvm_read_c0_guest_entrylo0(cop0),
1158*4882a593Smuzhiyun 		  kvm_read_c0_guest_entrylo1(cop0));
1159*4882a593Smuzhiyun 
1160*4882a593Smuzhiyun 	return EMULATE_DONE;
1161*4882a593Smuzhiyun }
1162*4882a593Smuzhiyun 
kvm_mips_emul_tlbp(struct kvm_vcpu * vcpu)1163*4882a593Smuzhiyun enum emulation_result kvm_mips_emul_tlbp(struct kvm_vcpu *vcpu)
1164*4882a593Smuzhiyun {
1165*4882a593Smuzhiyun 	struct mips_coproc *cop0 = vcpu->arch.cop0;
1166*4882a593Smuzhiyun 	long entryhi = kvm_read_c0_guest_entryhi(cop0);
1167*4882a593Smuzhiyun 	unsigned long pc = vcpu->arch.pc;
1168*4882a593Smuzhiyun 	int index = -1;
1169*4882a593Smuzhiyun 
1170*4882a593Smuzhiyun 	index = kvm_mips_guest_tlb_lookup(vcpu, entryhi);
1171*4882a593Smuzhiyun 
1172*4882a593Smuzhiyun 	kvm_write_c0_guest_index(cop0, index);
1173*4882a593Smuzhiyun 
1174*4882a593Smuzhiyun 	kvm_debug("[%#lx] COP0_TLBP (entryhi: %#lx), index: %d\n", pc, entryhi,
1175*4882a593Smuzhiyun 		  index);
1176*4882a593Smuzhiyun 
1177*4882a593Smuzhiyun 	return EMULATE_DONE;
1178*4882a593Smuzhiyun }
1179*4882a593Smuzhiyun 
1180*4882a593Smuzhiyun /**
1181*4882a593Smuzhiyun  * kvm_mips_config1_wrmask() - Find mask of writable bits in guest Config1
1182*4882a593Smuzhiyun  * @vcpu:	Virtual CPU.
1183*4882a593Smuzhiyun  *
1184*4882a593Smuzhiyun  * Finds the mask of bits which are writable in the guest's Config1 CP0
1185*4882a593Smuzhiyun  * register, by userland (currently read-only to the guest).
1186*4882a593Smuzhiyun  */
kvm_mips_config1_wrmask(struct kvm_vcpu * vcpu)1187*4882a593Smuzhiyun unsigned int kvm_mips_config1_wrmask(struct kvm_vcpu *vcpu)
1188*4882a593Smuzhiyun {
1189*4882a593Smuzhiyun 	unsigned int mask = 0;
1190*4882a593Smuzhiyun 
1191*4882a593Smuzhiyun 	/* Permit FPU to be present if FPU is supported */
1192*4882a593Smuzhiyun 	if (kvm_mips_guest_can_have_fpu(&vcpu->arch))
1193*4882a593Smuzhiyun 		mask |= MIPS_CONF1_FP;
1194*4882a593Smuzhiyun 
1195*4882a593Smuzhiyun 	return mask;
1196*4882a593Smuzhiyun }
1197*4882a593Smuzhiyun 
1198*4882a593Smuzhiyun /**
1199*4882a593Smuzhiyun  * kvm_mips_config3_wrmask() - Find mask of writable bits in guest Config3
1200*4882a593Smuzhiyun  * @vcpu:	Virtual CPU.
1201*4882a593Smuzhiyun  *
1202*4882a593Smuzhiyun  * Finds the mask of bits which are writable in the guest's Config3 CP0
1203*4882a593Smuzhiyun  * register, by userland (currently read-only to the guest).
1204*4882a593Smuzhiyun  */
kvm_mips_config3_wrmask(struct kvm_vcpu * vcpu)1205*4882a593Smuzhiyun unsigned int kvm_mips_config3_wrmask(struct kvm_vcpu *vcpu)
1206*4882a593Smuzhiyun {
1207*4882a593Smuzhiyun 	/* Config4 and ULRI are optional */
1208*4882a593Smuzhiyun 	unsigned int mask = MIPS_CONF_M | MIPS_CONF3_ULRI;
1209*4882a593Smuzhiyun 
1210*4882a593Smuzhiyun 	/* Permit MSA to be present if MSA is supported */
1211*4882a593Smuzhiyun 	if (kvm_mips_guest_can_have_msa(&vcpu->arch))
1212*4882a593Smuzhiyun 		mask |= MIPS_CONF3_MSA;
1213*4882a593Smuzhiyun 
1214*4882a593Smuzhiyun 	return mask;
1215*4882a593Smuzhiyun }
1216*4882a593Smuzhiyun 
1217*4882a593Smuzhiyun /**
1218*4882a593Smuzhiyun  * kvm_mips_config4_wrmask() - Find mask of writable bits in guest Config4
1219*4882a593Smuzhiyun  * @vcpu:	Virtual CPU.
1220*4882a593Smuzhiyun  *
1221*4882a593Smuzhiyun  * Finds the mask of bits which are writable in the guest's Config4 CP0
1222*4882a593Smuzhiyun  * register, by userland (currently read-only to the guest).
1223*4882a593Smuzhiyun  */
kvm_mips_config4_wrmask(struct kvm_vcpu * vcpu)1224*4882a593Smuzhiyun unsigned int kvm_mips_config4_wrmask(struct kvm_vcpu *vcpu)
1225*4882a593Smuzhiyun {
1226*4882a593Smuzhiyun 	/* Config5 is optional */
1227*4882a593Smuzhiyun 	unsigned int mask = MIPS_CONF_M;
1228*4882a593Smuzhiyun 
1229*4882a593Smuzhiyun 	/* KScrExist */
1230*4882a593Smuzhiyun 	mask |= 0xfc << MIPS_CONF4_KSCREXIST_SHIFT;
1231*4882a593Smuzhiyun 
1232*4882a593Smuzhiyun 	return mask;
1233*4882a593Smuzhiyun }
1234*4882a593Smuzhiyun 
1235*4882a593Smuzhiyun /**
1236*4882a593Smuzhiyun  * kvm_mips_config5_wrmask() - Find mask of writable bits in guest Config5
1237*4882a593Smuzhiyun  * @vcpu:	Virtual CPU.
1238*4882a593Smuzhiyun  *
1239*4882a593Smuzhiyun  * Finds the mask of bits which are writable in the guest's Config5 CP0
1240*4882a593Smuzhiyun  * register, by the guest itself.
1241*4882a593Smuzhiyun  */
kvm_mips_config5_wrmask(struct kvm_vcpu * vcpu)1242*4882a593Smuzhiyun unsigned int kvm_mips_config5_wrmask(struct kvm_vcpu *vcpu)
1243*4882a593Smuzhiyun {
1244*4882a593Smuzhiyun 	unsigned int mask = 0;
1245*4882a593Smuzhiyun 
1246*4882a593Smuzhiyun 	/* Permit MSAEn changes if MSA supported and enabled */
1247*4882a593Smuzhiyun 	if (kvm_mips_guest_has_msa(&vcpu->arch))
1248*4882a593Smuzhiyun 		mask |= MIPS_CONF5_MSAEN;
1249*4882a593Smuzhiyun 
1250*4882a593Smuzhiyun 	/*
1251*4882a593Smuzhiyun 	 * Permit guest FPU mode changes if FPU is enabled and the relevant
1252*4882a593Smuzhiyun 	 * feature exists according to FIR register.
1253*4882a593Smuzhiyun 	 */
1254*4882a593Smuzhiyun 	if (kvm_mips_guest_has_fpu(&vcpu->arch)) {
1255*4882a593Smuzhiyun 		if (cpu_has_fre)
1256*4882a593Smuzhiyun 			mask |= MIPS_CONF5_FRE;
1257*4882a593Smuzhiyun 		/* We don't support UFR or UFE */
1258*4882a593Smuzhiyun 	}
1259*4882a593Smuzhiyun 
1260*4882a593Smuzhiyun 	return mask;
1261*4882a593Smuzhiyun }
1262*4882a593Smuzhiyun 
kvm_mips_emulate_CP0(union mips_instruction inst,u32 * opc,u32 cause,struct kvm_vcpu * vcpu)1263*4882a593Smuzhiyun enum emulation_result kvm_mips_emulate_CP0(union mips_instruction inst,
1264*4882a593Smuzhiyun 					   u32 *opc, u32 cause,
1265*4882a593Smuzhiyun 					   struct kvm_vcpu *vcpu)
1266*4882a593Smuzhiyun {
1267*4882a593Smuzhiyun 	struct mips_coproc *cop0 = vcpu->arch.cop0;
1268*4882a593Smuzhiyun 	enum emulation_result er = EMULATE_DONE;
1269*4882a593Smuzhiyun 	u32 rt, rd, sel;
1270*4882a593Smuzhiyun 	unsigned long curr_pc;
1271*4882a593Smuzhiyun 
1272*4882a593Smuzhiyun 	/*
1273*4882a593Smuzhiyun 	 * Update PC and hold onto current PC in case there is
1274*4882a593Smuzhiyun 	 * an error and we want to rollback the PC
1275*4882a593Smuzhiyun 	 */
1276*4882a593Smuzhiyun 	curr_pc = vcpu->arch.pc;
1277*4882a593Smuzhiyun 	er = update_pc(vcpu, cause);
1278*4882a593Smuzhiyun 	if (er == EMULATE_FAIL)
1279*4882a593Smuzhiyun 		return er;
1280*4882a593Smuzhiyun 
1281*4882a593Smuzhiyun 	if (inst.co_format.co) {
1282*4882a593Smuzhiyun 		switch (inst.co_format.func) {
1283*4882a593Smuzhiyun 		case tlbr_op:	/*  Read indexed TLB entry  */
1284*4882a593Smuzhiyun 			er = kvm_mips_emul_tlbr(vcpu);
1285*4882a593Smuzhiyun 			break;
1286*4882a593Smuzhiyun 		case tlbwi_op:	/*  Write indexed  */
1287*4882a593Smuzhiyun 			er = kvm_mips_emul_tlbwi(vcpu);
1288*4882a593Smuzhiyun 			break;
1289*4882a593Smuzhiyun 		case tlbwr_op:	/*  Write random  */
1290*4882a593Smuzhiyun 			er = kvm_mips_emul_tlbwr(vcpu);
1291*4882a593Smuzhiyun 			break;
1292*4882a593Smuzhiyun 		case tlbp_op:	/* TLB Probe */
1293*4882a593Smuzhiyun 			er = kvm_mips_emul_tlbp(vcpu);
1294*4882a593Smuzhiyun 			break;
1295*4882a593Smuzhiyun 		case rfe_op:
1296*4882a593Smuzhiyun 			kvm_err("!!!COP0_RFE!!!\n");
1297*4882a593Smuzhiyun 			break;
1298*4882a593Smuzhiyun 		case eret_op:
1299*4882a593Smuzhiyun 			er = kvm_mips_emul_eret(vcpu);
1300*4882a593Smuzhiyun 			goto dont_update_pc;
1301*4882a593Smuzhiyun 		case wait_op:
1302*4882a593Smuzhiyun 			er = kvm_mips_emul_wait(vcpu);
1303*4882a593Smuzhiyun 			break;
1304*4882a593Smuzhiyun 		case hypcall_op:
1305*4882a593Smuzhiyun 			er = kvm_mips_emul_hypcall(vcpu, inst);
1306*4882a593Smuzhiyun 			break;
1307*4882a593Smuzhiyun 		}
1308*4882a593Smuzhiyun 	} else {
1309*4882a593Smuzhiyun 		rt = inst.c0r_format.rt;
1310*4882a593Smuzhiyun 		rd = inst.c0r_format.rd;
1311*4882a593Smuzhiyun 		sel = inst.c0r_format.sel;
1312*4882a593Smuzhiyun 
1313*4882a593Smuzhiyun 		switch (inst.c0r_format.rs) {
1314*4882a593Smuzhiyun 		case mfc_op:
1315*4882a593Smuzhiyun #ifdef CONFIG_KVM_MIPS_DEBUG_COP0_COUNTERS
1316*4882a593Smuzhiyun 			cop0->stat[rd][sel]++;
1317*4882a593Smuzhiyun #endif
1318*4882a593Smuzhiyun 			/* Get reg */
1319*4882a593Smuzhiyun 			if ((rd == MIPS_CP0_COUNT) && (sel == 0)) {
1320*4882a593Smuzhiyun 				vcpu->arch.gprs[rt] =
1321*4882a593Smuzhiyun 				    (s32)kvm_mips_read_count(vcpu);
1322*4882a593Smuzhiyun 			} else if ((rd == MIPS_CP0_ERRCTL) && (sel == 0)) {
1323*4882a593Smuzhiyun 				vcpu->arch.gprs[rt] = 0x0;
1324*4882a593Smuzhiyun #ifdef CONFIG_KVM_MIPS_DYN_TRANS
1325*4882a593Smuzhiyun 				kvm_mips_trans_mfc0(inst, opc, vcpu);
1326*4882a593Smuzhiyun #endif
1327*4882a593Smuzhiyun 			} else {
1328*4882a593Smuzhiyun 				vcpu->arch.gprs[rt] = (s32)cop0->reg[rd][sel];
1329*4882a593Smuzhiyun 
1330*4882a593Smuzhiyun #ifdef CONFIG_KVM_MIPS_DYN_TRANS
1331*4882a593Smuzhiyun 				kvm_mips_trans_mfc0(inst, opc, vcpu);
1332*4882a593Smuzhiyun #endif
1333*4882a593Smuzhiyun 			}
1334*4882a593Smuzhiyun 
1335*4882a593Smuzhiyun 			trace_kvm_hwr(vcpu, KVM_TRACE_MFC0,
1336*4882a593Smuzhiyun 				      KVM_TRACE_COP0(rd, sel),
1337*4882a593Smuzhiyun 				      vcpu->arch.gprs[rt]);
1338*4882a593Smuzhiyun 			break;
1339*4882a593Smuzhiyun 
1340*4882a593Smuzhiyun 		case dmfc_op:
1341*4882a593Smuzhiyun 			vcpu->arch.gprs[rt] = cop0->reg[rd][sel];
1342*4882a593Smuzhiyun 
1343*4882a593Smuzhiyun 			trace_kvm_hwr(vcpu, KVM_TRACE_DMFC0,
1344*4882a593Smuzhiyun 				      KVM_TRACE_COP0(rd, sel),
1345*4882a593Smuzhiyun 				      vcpu->arch.gprs[rt]);
1346*4882a593Smuzhiyun 			break;
1347*4882a593Smuzhiyun 
1348*4882a593Smuzhiyun 		case mtc_op:
1349*4882a593Smuzhiyun #ifdef CONFIG_KVM_MIPS_DEBUG_COP0_COUNTERS
1350*4882a593Smuzhiyun 			cop0->stat[rd][sel]++;
1351*4882a593Smuzhiyun #endif
1352*4882a593Smuzhiyun 			trace_kvm_hwr(vcpu, KVM_TRACE_MTC0,
1353*4882a593Smuzhiyun 				      KVM_TRACE_COP0(rd, sel),
1354*4882a593Smuzhiyun 				      vcpu->arch.gprs[rt]);
1355*4882a593Smuzhiyun 
1356*4882a593Smuzhiyun 			if ((rd == MIPS_CP0_TLB_INDEX)
1357*4882a593Smuzhiyun 			    && (vcpu->arch.gprs[rt] >=
1358*4882a593Smuzhiyun 				KVM_MIPS_GUEST_TLB_SIZE)) {
1359*4882a593Smuzhiyun 				kvm_err("Invalid TLB Index: %ld",
1360*4882a593Smuzhiyun 					vcpu->arch.gprs[rt]);
1361*4882a593Smuzhiyun 				er = EMULATE_FAIL;
1362*4882a593Smuzhiyun 				break;
1363*4882a593Smuzhiyun 			}
1364*4882a593Smuzhiyun 			if ((rd == MIPS_CP0_PRID) && (sel == 1)) {
1365*4882a593Smuzhiyun 				/*
1366*4882a593Smuzhiyun 				 * Preserve core number, and keep the exception
1367*4882a593Smuzhiyun 				 * base in guest KSeg0.
1368*4882a593Smuzhiyun 				 */
1369*4882a593Smuzhiyun 				kvm_change_c0_guest_ebase(cop0, 0x1ffff000,
1370*4882a593Smuzhiyun 							  vcpu->arch.gprs[rt]);
1371*4882a593Smuzhiyun 			} else if (rd == MIPS_CP0_TLB_HI && sel == 0) {
1372*4882a593Smuzhiyun 				kvm_mips_change_entryhi(vcpu,
1373*4882a593Smuzhiyun 							vcpu->arch.gprs[rt]);
1374*4882a593Smuzhiyun 			}
1375*4882a593Smuzhiyun 			/* Are we writing to COUNT */
1376*4882a593Smuzhiyun 			else if ((rd == MIPS_CP0_COUNT) && (sel == 0)) {
1377*4882a593Smuzhiyun 				kvm_mips_write_count(vcpu, vcpu->arch.gprs[rt]);
1378*4882a593Smuzhiyun 				goto done;
1379*4882a593Smuzhiyun 			} else if ((rd == MIPS_CP0_COMPARE) && (sel == 0)) {
1380*4882a593Smuzhiyun 				/* If we are writing to COMPARE */
1381*4882a593Smuzhiyun 				/* Clear pending timer interrupt, if any */
1382*4882a593Smuzhiyun 				kvm_mips_write_compare(vcpu,
1383*4882a593Smuzhiyun 						       vcpu->arch.gprs[rt],
1384*4882a593Smuzhiyun 						       true);
1385*4882a593Smuzhiyun 			} else if ((rd == MIPS_CP0_STATUS) && (sel == 0)) {
1386*4882a593Smuzhiyun 				unsigned int old_val, val, change;
1387*4882a593Smuzhiyun 
1388*4882a593Smuzhiyun 				old_val = kvm_read_c0_guest_status(cop0);
1389*4882a593Smuzhiyun 				val = vcpu->arch.gprs[rt];
1390*4882a593Smuzhiyun 				change = val ^ old_val;
1391*4882a593Smuzhiyun 
1392*4882a593Smuzhiyun 				/* Make sure that the NMI bit is never set */
1393*4882a593Smuzhiyun 				val &= ~ST0_NMI;
1394*4882a593Smuzhiyun 
1395*4882a593Smuzhiyun 				/*
1396*4882a593Smuzhiyun 				 * Don't allow CU1 or FR to be set unless FPU
1397*4882a593Smuzhiyun 				 * capability enabled and exists in guest
1398*4882a593Smuzhiyun 				 * configuration.
1399*4882a593Smuzhiyun 				 */
1400*4882a593Smuzhiyun 				if (!kvm_mips_guest_has_fpu(&vcpu->arch))
1401*4882a593Smuzhiyun 					val &= ~(ST0_CU1 | ST0_FR);
1402*4882a593Smuzhiyun 
1403*4882a593Smuzhiyun 				/*
1404*4882a593Smuzhiyun 				 * Also don't allow FR to be set if host doesn't
1405*4882a593Smuzhiyun 				 * support it.
1406*4882a593Smuzhiyun 				 */
1407*4882a593Smuzhiyun 				if (!(current_cpu_data.fpu_id & MIPS_FPIR_F64))
1408*4882a593Smuzhiyun 					val &= ~ST0_FR;
1409*4882a593Smuzhiyun 
1410*4882a593Smuzhiyun 
1411*4882a593Smuzhiyun 				/* Handle changes in FPU mode */
1412*4882a593Smuzhiyun 				preempt_disable();
1413*4882a593Smuzhiyun 
1414*4882a593Smuzhiyun 				/*
1415*4882a593Smuzhiyun 				 * FPU and Vector register state is made
1416*4882a593Smuzhiyun 				 * UNPREDICTABLE by a change of FR, so don't
1417*4882a593Smuzhiyun 				 * even bother saving it.
1418*4882a593Smuzhiyun 				 */
1419*4882a593Smuzhiyun 				if (change & ST0_FR)
1420*4882a593Smuzhiyun 					kvm_drop_fpu(vcpu);
1421*4882a593Smuzhiyun 
1422*4882a593Smuzhiyun 				/*
1423*4882a593Smuzhiyun 				 * If MSA state is already live, it is undefined
1424*4882a593Smuzhiyun 				 * how it interacts with FR=0 FPU state, and we
1425*4882a593Smuzhiyun 				 * don't want to hit reserved instruction
1426*4882a593Smuzhiyun 				 * exceptions trying to save the MSA state later
1427*4882a593Smuzhiyun 				 * when CU=1 && FR=1, so play it safe and save
1428*4882a593Smuzhiyun 				 * it first.
1429*4882a593Smuzhiyun 				 */
1430*4882a593Smuzhiyun 				if (change & ST0_CU1 && !(val & ST0_FR) &&
1431*4882a593Smuzhiyun 				    vcpu->arch.aux_inuse & KVM_MIPS_AUX_MSA)
1432*4882a593Smuzhiyun 					kvm_lose_fpu(vcpu);
1433*4882a593Smuzhiyun 
1434*4882a593Smuzhiyun 				/*
1435*4882a593Smuzhiyun 				 * Propagate CU1 (FPU enable) changes
1436*4882a593Smuzhiyun 				 * immediately if the FPU context is already
1437*4882a593Smuzhiyun 				 * loaded. When disabling we leave the context
1438*4882a593Smuzhiyun 				 * loaded so it can be quickly enabled again in
1439*4882a593Smuzhiyun 				 * the near future.
1440*4882a593Smuzhiyun 				 */
1441*4882a593Smuzhiyun 				if (change & ST0_CU1 &&
1442*4882a593Smuzhiyun 				    vcpu->arch.aux_inuse & KVM_MIPS_AUX_FPU)
1443*4882a593Smuzhiyun 					change_c0_status(ST0_CU1, val);
1444*4882a593Smuzhiyun 
1445*4882a593Smuzhiyun 				preempt_enable();
1446*4882a593Smuzhiyun 
1447*4882a593Smuzhiyun 				kvm_write_c0_guest_status(cop0, val);
1448*4882a593Smuzhiyun 
1449*4882a593Smuzhiyun #ifdef CONFIG_KVM_MIPS_DYN_TRANS
1450*4882a593Smuzhiyun 				/*
1451*4882a593Smuzhiyun 				 * If FPU present, we need CU1/FR bits to take
1452*4882a593Smuzhiyun 				 * effect fairly soon.
1453*4882a593Smuzhiyun 				 */
1454*4882a593Smuzhiyun 				if (!kvm_mips_guest_has_fpu(&vcpu->arch))
1455*4882a593Smuzhiyun 					kvm_mips_trans_mtc0(inst, opc, vcpu);
1456*4882a593Smuzhiyun #endif
1457*4882a593Smuzhiyun 			} else if ((rd == MIPS_CP0_CONFIG) && (sel == 5)) {
1458*4882a593Smuzhiyun 				unsigned int old_val, val, change, wrmask;
1459*4882a593Smuzhiyun 
1460*4882a593Smuzhiyun 				old_val = kvm_read_c0_guest_config5(cop0);
1461*4882a593Smuzhiyun 				val = vcpu->arch.gprs[rt];
1462*4882a593Smuzhiyun 
1463*4882a593Smuzhiyun 				/* Only a few bits are writable in Config5 */
1464*4882a593Smuzhiyun 				wrmask = kvm_mips_config5_wrmask(vcpu);
1465*4882a593Smuzhiyun 				change = (val ^ old_val) & wrmask;
1466*4882a593Smuzhiyun 				val = old_val ^ change;
1467*4882a593Smuzhiyun 
1468*4882a593Smuzhiyun 
1469*4882a593Smuzhiyun 				/* Handle changes in FPU/MSA modes */
1470*4882a593Smuzhiyun 				preempt_disable();
1471*4882a593Smuzhiyun 
1472*4882a593Smuzhiyun 				/*
1473*4882a593Smuzhiyun 				 * Propagate FRE changes immediately if the FPU
1474*4882a593Smuzhiyun 				 * context is already loaded.
1475*4882a593Smuzhiyun 				 */
1476*4882a593Smuzhiyun 				if (change & MIPS_CONF5_FRE &&
1477*4882a593Smuzhiyun 				    vcpu->arch.aux_inuse & KVM_MIPS_AUX_FPU)
1478*4882a593Smuzhiyun 					change_c0_config5(MIPS_CONF5_FRE, val);
1479*4882a593Smuzhiyun 
1480*4882a593Smuzhiyun 				/*
1481*4882a593Smuzhiyun 				 * Propagate MSAEn changes immediately if the
1482*4882a593Smuzhiyun 				 * MSA context is already loaded. When disabling
1483*4882a593Smuzhiyun 				 * we leave the context loaded so it can be
1484*4882a593Smuzhiyun 				 * quickly enabled again in the near future.
1485*4882a593Smuzhiyun 				 */
1486*4882a593Smuzhiyun 				if (change & MIPS_CONF5_MSAEN &&
1487*4882a593Smuzhiyun 				    vcpu->arch.aux_inuse & KVM_MIPS_AUX_MSA)
1488*4882a593Smuzhiyun 					change_c0_config5(MIPS_CONF5_MSAEN,
1489*4882a593Smuzhiyun 							  val);
1490*4882a593Smuzhiyun 
1491*4882a593Smuzhiyun 				preempt_enable();
1492*4882a593Smuzhiyun 
1493*4882a593Smuzhiyun 				kvm_write_c0_guest_config5(cop0, val);
1494*4882a593Smuzhiyun 			} else if ((rd == MIPS_CP0_CAUSE) && (sel == 0)) {
1495*4882a593Smuzhiyun 				u32 old_cause, new_cause;
1496*4882a593Smuzhiyun 
1497*4882a593Smuzhiyun 				old_cause = kvm_read_c0_guest_cause(cop0);
1498*4882a593Smuzhiyun 				new_cause = vcpu->arch.gprs[rt];
1499*4882a593Smuzhiyun 				/* Update R/W bits */
1500*4882a593Smuzhiyun 				kvm_change_c0_guest_cause(cop0, 0x08800300,
1501*4882a593Smuzhiyun 							  new_cause);
1502*4882a593Smuzhiyun 				/* DC bit enabling/disabling timer? */
1503*4882a593Smuzhiyun 				if ((old_cause ^ new_cause) & CAUSEF_DC) {
1504*4882a593Smuzhiyun 					if (new_cause & CAUSEF_DC)
1505*4882a593Smuzhiyun 						kvm_mips_count_disable_cause(vcpu);
1506*4882a593Smuzhiyun 					else
1507*4882a593Smuzhiyun 						kvm_mips_count_enable_cause(vcpu);
1508*4882a593Smuzhiyun 				}
1509*4882a593Smuzhiyun 			} else if ((rd == MIPS_CP0_HWRENA) && (sel == 0)) {
1510*4882a593Smuzhiyun 				u32 mask = MIPS_HWRENA_CPUNUM |
1511*4882a593Smuzhiyun 					   MIPS_HWRENA_SYNCISTEP |
1512*4882a593Smuzhiyun 					   MIPS_HWRENA_CC |
1513*4882a593Smuzhiyun 					   MIPS_HWRENA_CCRES;
1514*4882a593Smuzhiyun 
1515*4882a593Smuzhiyun 				if (kvm_read_c0_guest_config3(cop0) &
1516*4882a593Smuzhiyun 				    MIPS_CONF3_ULRI)
1517*4882a593Smuzhiyun 					mask |= MIPS_HWRENA_ULR;
1518*4882a593Smuzhiyun 				cop0->reg[rd][sel] = vcpu->arch.gprs[rt] & mask;
1519*4882a593Smuzhiyun 			} else {
1520*4882a593Smuzhiyun 				cop0->reg[rd][sel] = vcpu->arch.gprs[rt];
1521*4882a593Smuzhiyun #ifdef CONFIG_KVM_MIPS_DYN_TRANS
1522*4882a593Smuzhiyun 				kvm_mips_trans_mtc0(inst, opc, vcpu);
1523*4882a593Smuzhiyun #endif
1524*4882a593Smuzhiyun 			}
1525*4882a593Smuzhiyun 			break;
1526*4882a593Smuzhiyun 
1527*4882a593Smuzhiyun 		case dmtc_op:
1528*4882a593Smuzhiyun 			kvm_err("!!!!!!![%#lx]dmtc_op: rt: %d, rd: %d, sel: %d!!!!!!\n",
1529*4882a593Smuzhiyun 				vcpu->arch.pc, rt, rd, sel);
1530*4882a593Smuzhiyun 			trace_kvm_hwr(vcpu, KVM_TRACE_DMTC0,
1531*4882a593Smuzhiyun 				      KVM_TRACE_COP0(rd, sel),
1532*4882a593Smuzhiyun 				      vcpu->arch.gprs[rt]);
1533*4882a593Smuzhiyun 			er = EMULATE_FAIL;
1534*4882a593Smuzhiyun 			break;
1535*4882a593Smuzhiyun 
1536*4882a593Smuzhiyun 		case mfmc0_op:
1537*4882a593Smuzhiyun #ifdef KVM_MIPS_DEBUG_COP0_COUNTERS
1538*4882a593Smuzhiyun 			cop0->stat[MIPS_CP0_STATUS][0]++;
1539*4882a593Smuzhiyun #endif
1540*4882a593Smuzhiyun 			if (rt != 0)
1541*4882a593Smuzhiyun 				vcpu->arch.gprs[rt] =
1542*4882a593Smuzhiyun 				    kvm_read_c0_guest_status(cop0);
1543*4882a593Smuzhiyun 			/* EI */
1544*4882a593Smuzhiyun 			if (inst.mfmc0_format.sc) {
1545*4882a593Smuzhiyun 				kvm_debug("[%#lx] mfmc0_op: EI\n",
1546*4882a593Smuzhiyun 					  vcpu->arch.pc);
1547*4882a593Smuzhiyun 				kvm_set_c0_guest_status(cop0, ST0_IE);
1548*4882a593Smuzhiyun 			} else {
1549*4882a593Smuzhiyun 				kvm_debug("[%#lx] mfmc0_op: DI\n",
1550*4882a593Smuzhiyun 					  vcpu->arch.pc);
1551*4882a593Smuzhiyun 				kvm_clear_c0_guest_status(cop0, ST0_IE);
1552*4882a593Smuzhiyun 			}
1553*4882a593Smuzhiyun 
1554*4882a593Smuzhiyun 			break;
1555*4882a593Smuzhiyun 
1556*4882a593Smuzhiyun 		case wrpgpr_op:
1557*4882a593Smuzhiyun 			{
1558*4882a593Smuzhiyun 				u32 css = cop0->reg[MIPS_CP0_STATUS][2] & 0xf;
1559*4882a593Smuzhiyun 				u32 pss =
1560*4882a593Smuzhiyun 				    (cop0->reg[MIPS_CP0_STATUS][2] >> 6) & 0xf;
1561*4882a593Smuzhiyun 				/*
1562*4882a593Smuzhiyun 				 * We don't support any shadow register sets, so
1563*4882a593Smuzhiyun 				 * SRSCtl[PSS] == SRSCtl[CSS] = 0
1564*4882a593Smuzhiyun 				 */
1565*4882a593Smuzhiyun 				if (css || pss) {
1566*4882a593Smuzhiyun 					er = EMULATE_FAIL;
1567*4882a593Smuzhiyun 					break;
1568*4882a593Smuzhiyun 				}
1569*4882a593Smuzhiyun 				kvm_debug("WRPGPR[%d][%d] = %#lx\n", pss, rd,
1570*4882a593Smuzhiyun 					  vcpu->arch.gprs[rt]);
1571*4882a593Smuzhiyun 				vcpu->arch.gprs[rd] = vcpu->arch.gprs[rt];
1572*4882a593Smuzhiyun 			}
1573*4882a593Smuzhiyun 			break;
1574*4882a593Smuzhiyun 		default:
1575*4882a593Smuzhiyun 			kvm_err("[%#lx]MachEmulateCP0: unsupported COP0, copz: 0x%x\n",
1576*4882a593Smuzhiyun 				vcpu->arch.pc, inst.c0r_format.rs);
1577*4882a593Smuzhiyun 			er = EMULATE_FAIL;
1578*4882a593Smuzhiyun 			break;
1579*4882a593Smuzhiyun 		}
1580*4882a593Smuzhiyun 	}
1581*4882a593Smuzhiyun 
1582*4882a593Smuzhiyun done:
1583*4882a593Smuzhiyun 	/* Rollback PC only if emulation was unsuccessful */
1584*4882a593Smuzhiyun 	if (er == EMULATE_FAIL)
1585*4882a593Smuzhiyun 		vcpu->arch.pc = curr_pc;
1586*4882a593Smuzhiyun 
1587*4882a593Smuzhiyun dont_update_pc:
1588*4882a593Smuzhiyun 	/*
1589*4882a593Smuzhiyun 	 * This is for special instructions whose emulation
1590*4882a593Smuzhiyun 	 * updates the PC, so do not overwrite the PC under
1591*4882a593Smuzhiyun 	 * any circumstances
1592*4882a593Smuzhiyun 	 */
1593*4882a593Smuzhiyun 
1594*4882a593Smuzhiyun 	return er;
1595*4882a593Smuzhiyun }
1596*4882a593Smuzhiyun 
kvm_mips_emulate_store(union mips_instruction inst,u32 cause,struct kvm_vcpu * vcpu)1597*4882a593Smuzhiyun enum emulation_result kvm_mips_emulate_store(union mips_instruction inst,
1598*4882a593Smuzhiyun 					     u32 cause,
1599*4882a593Smuzhiyun 					     struct kvm_vcpu *vcpu)
1600*4882a593Smuzhiyun {
1601*4882a593Smuzhiyun 	int r;
1602*4882a593Smuzhiyun 	enum emulation_result er;
1603*4882a593Smuzhiyun 	u32 rt;
1604*4882a593Smuzhiyun 	struct kvm_run *run = vcpu->run;
1605*4882a593Smuzhiyun 	void *data = run->mmio.data;
1606*4882a593Smuzhiyun 	unsigned int imme;
1607*4882a593Smuzhiyun 	unsigned long curr_pc;
1608*4882a593Smuzhiyun 
1609*4882a593Smuzhiyun 	/*
1610*4882a593Smuzhiyun 	 * Update PC and hold onto current PC in case there is
1611*4882a593Smuzhiyun 	 * an error and we want to rollback the PC
1612*4882a593Smuzhiyun 	 */
1613*4882a593Smuzhiyun 	curr_pc = vcpu->arch.pc;
1614*4882a593Smuzhiyun 	er = update_pc(vcpu, cause);
1615*4882a593Smuzhiyun 	if (er == EMULATE_FAIL)
1616*4882a593Smuzhiyun 		return er;
1617*4882a593Smuzhiyun 
1618*4882a593Smuzhiyun 	rt = inst.i_format.rt;
1619*4882a593Smuzhiyun 
1620*4882a593Smuzhiyun 	run->mmio.phys_addr = kvm_mips_callbacks->gva_to_gpa(
1621*4882a593Smuzhiyun 						vcpu->arch.host_cp0_badvaddr);
1622*4882a593Smuzhiyun 	if (run->mmio.phys_addr == KVM_INVALID_ADDR)
1623*4882a593Smuzhiyun 		goto out_fail;
1624*4882a593Smuzhiyun 
1625*4882a593Smuzhiyun 	switch (inst.i_format.opcode) {
1626*4882a593Smuzhiyun #if defined(CONFIG_64BIT) && defined(CONFIG_KVM_MIPS_VZ)
1627*4882a593Smuzhiyun 	case sd_op:
1628*4882a593Smuzhiyun 		run->mmio.len = 8;
1629*4882a593Smuzhiyun 		*(u64 *)data = vcpu->arch.gprs[rt];
1630*4882a593Smuzhiyun 
1631*4882a593Smuzhiyun 		kvm_debug("[%#lx] OP_SD: eaddr: %#lx, gpr: %#lx, data: %#llx\n",
1632*4882a593Smuzhiyun 			  vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr,
1633*4882a593Smuzhiyun 			  vcpu->arch.gprs[rt], *(u64 *)data);
1634*4882a593Smuzhiyun 		break;
1635*4882a593Smuzhiyun #endif
1636*4882a593Smuzhiyun 
1637*4882a593Smuzhiyun 	case sw_op:
1638*4882a593Smuzhiyun 		run->mmio.len = 4;
1639*4882a593Smuzhiyun 		*(u32 *)data = vcpu->arch.gprs[rt];
1640*4882a593Smuzhiyun 
1641*4882a593Smuzhiyun 		kvm_debug("[%#lx] OP_SW: eaddr: %#lx, gpr: %#lx, data: %#x\n",
1642*4882a593Smuzhiyun 			  vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr,
1643*4882a593Smuzhiyun 			  vcpu->arch.gprs[rt], *(u32 *)data);
1644*4882a593Smuzhiyun 		break;
1645*4882a593Smuzhiyun 
1646*4882a593Smuzhiyun 	case sh_op:
1647*4882a593Smuzhiyun 		run->mmio.len = 2;
1648*4882a593Smuzhiyun 		*(u16 *)data = vcpu->arch.gprs[rt];
1649*4882a593Smuzhiyun 
1650*4882a593Smuzhiyun 		kvm_debug("[%#lx] OP_SH: eaddr: %#lx, gpr: %#lx, data: %#x\n",
1651*4882a593Smuzhiyun 			  vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr,
1652*4882a593Smuzhiyun 			  vcpu->arch.gprs[rt], *(u16 *)data);
1653*4882a593Smuzhiyun 		break;
1654*4882a593Smuzhiyun 
1655*4882a593Smuzhiyun 	case sb_op:
1656*4882a593Smuzhiyun 		run->mmio.len = 1;
1657*4882a593Smuzhiyun 		*(u8 *)data = vcpu->arch.gprs[rt];
1658*4882a593Smuzhiyun 
1659*4882a593Smuzhiyun 		kvm_debug("[%#lx] OP_SB: eaddr: %#lx, gpr: %#lx, data: %#x\n",
1660*4882a593Smuzhiyun 			  vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr,
1661*4882a593Smuzhiyun 			  vcpu->arch.gprs[rt], *(u8 *)data);
1662*4882a593Smuzhiyun 		break;
1663*4882a593Smuzhiyun 
1664*4882a593Smuzhiyun 	case swl_op:
1665*4882a593Smuzhiyun 		run->mmio.phys_addr = kvm_mips_callbacks->gva_to_gpa(
1666*4882a593Smuzhiyun 					vcpu->arch.host_cp0_badvaddr) & (~0x3);
1667*4882a593Smuzhiyun 		run->mmio.len = 4;
1668*4882a593Smuzhiyun 		imme = vcpu->arch.host_cp0_badvaddr & 0x3;
1669*4882a593Smuzhiyun 		switch (imme) {
1670*4882a593Smuzhiyun 		case 0:
1671*4882a593Smuzhiyun 			*(u32 *)data = ((*(u32 *)data) & 0xffffff00) |
1672*4882a593Smuzhiyun 					(vcpu->arch.gprs[rt] >> 24);
1673*4882a593Smuzhiyun 			break;
1674*4882a593Smuzhiyun 		case 1:
1675*4882a593Smuzhiyun 			*(u32 *)data = ((*(u32 *)data) & 0xffff0000) |
1676*4882a593Smuzhiyun 					(vcpu->arch.gprs[rt] >> 16);
1677*4882a593Smuzhiyun 			break;
1678*4882a593Smuzhiyun 		case 2:
1679*4882a593Smuzhiyun 			*(u32 *)data = ((*(u32 *)data) & 0xff000000) |
1680*4882a593Smuzhiyun 					(vcpu->arch.gprs[rt] >> 8);
1681*4882a593Smuzhiyun 			break;
1682*4882a593Smuzhiyun 		case 3:
1683*4882a593Smuzhiyun 			*(u32 *)data = vcpu->arch.gprs[rt];
1684*4882a593Smuzhiyun 			break;
1685*4882a593Smuzhiyun 		default:
1686*4882a593Smuzhiyun 			break;
1687*4882a593Smuzhiyun 		}
1688*4882a593Smuzhiyun 
1689*4882a593Smuzhiyun 		kvm_debug("[%#lx] OP_SWL: eaddr: %#lx, gpr: %#lx, data: %#x\n",
1690*4882a593Smuzhiyun 			  vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr,
1691*4882a593Smuzhiyun 			  vcpu->arch.gprs[rt], *(u32 *)data);
1692*4882a593Smuzhiyun 		break;
1693*4882a593Smuzhiyun 
1694*4882a593Smuzhiyun 	case swr_op:
1695*4882a593Smuzhiyun 		run->mmio.phys_addr = kvm_mips_callbacks->gva_to_gpa(
1696*4882a593Smuzhiyun 					vcpu->arch.host_cp0_badvaddr) & (~0x3);
1697*4882a593Smuzhiyun 		run->mmio.len = 4;
1698*4882a593Smuzhiyun 		imme = vcpu->arch.host_cp0_badvaddr & 0x3;
1699*4882a593Smuzhiyun 		switch (imme) {
1700*4882a593Smuzhiyun 		case 0:
1701*4882a593Smuzhiyun 			*(u32 *)data = vcpu->arch.gprs[rt];
1702*4882a593Smuzhiyun 			break;
1703*4882a593Smuzhiyun 		case 1:
1704*4882a593Smuzhiyun 			*(u32 *)data = ((*(u32 *)data) & 0xff) |
1705*4882a593Smuzhiyun 					(vcpu->arch.gprs[rt] << 8);
1706*4882a593Smuzhiyun 			break;
1707*4882a593Smuzhiyun 		case 2:
1708*4882a593Smuzhiyun 			*(u32 *)data = ((*(u32 *)data) & 0xffff) |
1709*4882a593Smuzhiyun 					(vcpu->arch.gprs[rt] << 16);
1710*4882a593Smuzhiyun 			break;
1711*4882a593Smuzhiyun 		case 3:
1712*4882a593Smuzhiyun 			*(u32 *)data = ((*(u32 *)data) & 0xffffff) |
1713*4882a593Smuzhiyun 					(vcpu->arch.gprs[rt] << 24);
1714*4882a593Smuzhiyun 			break;
1715*4882a593Smuzhiyun 		default:
1716*4882a593Smuzhiyun 			break;
1717*4882a593Smuzhiyun 		}
1718*4882a593Smuzhiyun 
1719*4882a593Smuzhiyun 		kvm_debug("[%#lx] OP_SWR: eaddr: %#lx, gpr: %#lx, data: %#x\n",
1720*4882a593Smuzhiyun 			  vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr,
1721*4882a593Smuzhiyun 			  vcpu->arch.gprs[rt], *(u32 *)data);
1722*4882a593Smuzhiyun 		break;
1723*4882a593Smuzhiyun 
1724*4882a593Smuzhiyun #if defined(CONFIG_64BIT) && defined(CONFIG_KVM_MIPS_VZ)
1725*4882a593Smuzhiyun 	case sdl_op:
1726*4882a593Smuzhiyun 		run->mmio.phys_addr = kvm_mips_callbacks->gva_to_gpa(
1727*4882a593Smuzhiyun 					vcpu->arch.host_cp0_badvaddr) & (~0x7);
1728*4882a593Smuzhiyun 
1729*4882a593Smuzhiyun 		run->mmio.len = 8;
1730*4882a593Smuzhiyun 		imme = vcpu->arch.host_cp0_badvaddr & 0x7;
1731*4882a593Smuzhiyun 		switch (imme) {
1732*4882a593Smuzhiyun 		case 0:
1733*4882a593Smuzhiyun 			*(u64 *)data = ((*(u64 *)data) & 0xffffffffffffff00) |
1734*4882a593Smuzhiyun 					((vcpu->arch.gprs[rt] >> 56) & 0xff);
1735*4882a593Smuzhiyun 			break;
1736*4882a593Smuzhiyun 		case 1:
1737*4882a593Smuzhiyun 			*(u64 *)data = ((*(u64 *)data) & 0xffffffffffff0000) |
1738*4882a593Smuzhiyun 					((vcpu->arch.gprs[rt] >> 48) & 0xffff);
1739*4882a593Smuzhiyun 			break;
1740*4882a593Smuzhiyun 		case 2:
1741*4882a593Smuzhiyun 			*(u64 *)data = ((*(u64 *)data) & 0xffffffffff000000) |
1742*4882a593Smuzhiyun 					((vcpu->arch.gprs[rt] >> 40) & 0xffffff);
1743*4882a593Smuzhiyun 			break;
1744*4882a593Smuzhiyun 		case 3:
1745*4882a593Smuzhiyun 			*(u64 *)data = ((*(u64 *)data) & 0xffffffff00000000) |
1746*4882a593Smuzhiyun 					((vcpu->arch.gprs[rt] >> 32) & 0xffffffff);
1747*4882a593Smuzhiyun 			break;
1748*4882a593Smuzhiyun 		case 4:
1749*4882a593Smuzhiyun 			*(u64 *)data = ((*(u64 *)data) & 0xffffff0000000000) |
1750*4882a593Smuzhiyun 					((vcpu->arch.gprs[rt] >> 24) & 0xffffffffff);
1751*4882a593Smuzhiyun 			break;
1752*4882a593Smuzhiyun 		case 5:
1753*4882a593Smuzhiyun 			*(u64 *)data = ((*(u64 *)data) & 0xffff000000000000) |
1754*4882a593Smuzhiyun 					((vcpu->arch.gprs[rt] >> 16) & 0xffffffffffff);
1755*4882a593Smuzhiyun 			break;
1756*4882a593Smuzhiyun 		case 6:
1757*4882a593Smuzhiyun 			*(u64 *)data = ((*(u64 *)data) & 0xff00000000000000) |
1758*4882a593Smuzhiyun 					((vcpu->arch.gprs[rt] >> 8) & 0xffffffffffffff);
1759*4882a593Smuzhiyun 			break;
1760*4882a593Smuzhiyun 		case 7:
1761*4882a593Smuzhiyun 			*(u64 *)data = vcpu->arch.gprs[rt];
1762*4882a593Smuzhiyun 			break;
1763*4882a593Smuzhiyun 		default:
1764*4882a593Smuzhiyun 			break;
1765*4882a593Smuzhiyun 		}
1766*4882a593Smuzhiyun 
1767*4882a593Smuzhiyun 		kvm_debug("[%#lx] OP_SDL: eaddr: %#lx, gpr: %#lx, data: %llx\n",
1768*4882a593Smuzhiyun 			  vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr,
1769*4882a593Smuzhiyun 			  vcpu->arch.gprs[rt], *(u64 *)data);
1770*4882a593Smuzhiyun 		break;
1771*4882a593Smuzhiyun 
1772*4882a593Smuzhiyun 	case sdr_op:
1773*4882a593Smuzhiyun 		run->mmio.phys_addr = kvm_mips_callbacks->gva_to_gpa(
1774*4882a593Smuzhiyun 					vcpu->arch.host_cp0_badvaddr) & (~0x7);
1775*4882a593Smuzhiyun 
1776*4882a593Smuzhiyun 		run->mmio.len = 8;
1777*4882a593Smuzhiyun 		imme = vcpu->arch.host_cp0_badvaddr & 0x7;
1778*4882a593Smuzhiyun 		switch (imme) {
1779*4882a593Smuzhiyun 		case 0:
1780*4882a593Smuzhiyun 			*(u64 *)data = vcpu->arch.gprs[rt];
1781*4882a593Smuzhiyun 			break;
1782*4882a593Smuzhiyun 		case 1:
1783*4882a593Smuzhiyun 			*(u64 *)data = ((*(u64 *)data) & 0xff) |
1784*4882a593Smuzhiyun 					(vcpu->arch.gprs[rt] << 8);
1785*4882a593Smuzhiyun 			break;
1786*4882a593Smuzhiyun 		case 2:
1787*4882a593Smuzhiyun 			*(u64 *)data = ((*(u64 *)data) & 0xffff) |
1788*4882a593Smuzhiyun 					(vcpu->arch.gprs[rt] << 16);
1789*4882a593Smuzhiyun 			break;
1790*4882a593Smuzhiyun 		case 3:
1791*4882a593Smuzhiyun 			*(u64 *)data = ((*(u64 *)data) & 0xffffff) |
1792*4882a593Smuzhiyun 					(vcpu->arch.gprs[rt] << 24);
1793*4882a593Smuzhiyun 			break;
1794*4882a593Smuzhiyun 		case 4:
1795*4882a593Smuzhiyun 			*(u64 *)data = ((*(u64 *)data) & 0xffffffff) |
1796*4882a593Smuzhiyun 					(vcpu->arch.gprs[rt] << 32);
1797*4882a593Smuzhiyun 			break;
1798*4882a593Smuzhiyun 		case 5:
1799*4882a593Smuzhiyun 			*(u64 *)data = ((*(u64 *)data) & 0xffffffffff) |
1800*4882a593Smuzhiyun 					(vcpu->arch.gprs[rt] << 40);
1801*4882a593Smuzhiyun 			break;
1802*4882a593Smuzhiyun 		case 6:
1803*4882a593Smuzhiyun 			*(u64 *)data = ((*(u64 *)data) & 0xffffffffffff) |
1804*4882a593Smuzhiyun 					(vcpu->arch.gprs[rt] << 48);
1805*4882a593Smuzhiyun 			break;
1806*4882a593Smuzhiyun 		case 7:
1807*4882a593Smuzhiyun 			*(u64 *)data = ((*(u64 *)data) & 0xffffffffffffff) |
1808*4882a593Smuzhiyun 					(vcpu->arch.gprs[rt] << 56);
1809*4882a593Smuzhiyun 			break;
1810*4882a593Smuzhiyun 		default:
1811*4882a593Smuzhiyun 			break;
1812*4882a593Smuzhiyun 		}
1813*4882a593Smuzhiyun 
1814*4882a593Smuzhiyun 		kvm_debug("[%#lx] OP_SDR: eaddr: %#lx, gpr: %#lx, data: %llx\n",
1815*4882a593Smuzhiyun 			  vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr,
1816*4882a593Smuzhiyun 			  vcpu->arch.gprs[rt], *(u64 *)data);
1817*4882a593Smuzhiyun 		break;
1818*4882a593Smuzhiyun #endif
1819*4882a593Smuzhiyun 
1820*4882a593Smuzhiyun #ifdef CONFIG_CPU_LOONGSON64
1821*4882a593Smuzhiyun 	case sdc2_op:
1822*4882a593Smuzhiyun 		rt = inst.loongson3_lsdc2_format.rt;
1823*4882a593Smuzhiyun 		switch (inst.loongson3_lsdc2_format.opcode1) {
1824*4882a593Smuzhiyun 		/*
1825*4882a593Smuzhiyun 		 * Loongson-3 overridden sdc2 instructions.
1826*4882a593Smuzhiyun 		 * opcode1              instruction
1827*4882a593Smuzhiyun 		 *   0x0          gssbx: store 1 bytes from GPR
1828*4882a593Smuzhiyun 		 *   0x1          gsshx: store 2 bytes from GPR
1829*4882a593Smuzhiyun 		 *   0x2          gsswx: store 4 bytes from GPR
1830*4882a593Smuzhiyun 		 *   0x3          gssdx: store 8 bytes from GPR
1831*4882a593Smuzhiyun 		 */
1832*4882a593Smuzhiyun 		case 0x0:
1833*4882a593Smuzhiyun 			run->mmio.len = 1;
1834*4882a593Smuzhiyun 			*(u8 *)data = vcpu->arch.gprs[rt];
1835*4882a593Smuzhiyun 
1836*4882a593Smuzhiyun 			kvm_debug("[%#lx] OP_GSSBX: eaddr: %#lx, gpr: %#lx, data: %#x\n",
1837*4882a593Smuzhiyun 				  vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr,
1838*4882a593Smuzhiyun 				  vcpu->arch.gprs[rt], *(u8 *)data);
1839*4882a593Smuzhiyun 			break;
1840*4882a593Smuzhiyun 		case 0x1:
1841*4882a593Smuzhiyun 			run->mmio.len = 2;
1842*4882a593Smuzhiyun 			*(u16 *)data = vcpu->arch.gprs[rt];
1843*4882a593Smuzhiyun 
1844*4882a593Smuzhiyun 			kvm_debug("[%#lx] OP_GSSSHX: eaddr: %#lx, gpr: %#lx, data: %#x\n",
1845*4882a593Smuzhiyun 				  vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr,
1846*4882a593Smuzhiyun 				  vcpu->arch.gprs[rt], *(u16 *)data);
1847*4882a593Smuzhiyun 			break;
1848*4882a593Smuzhiyun 		case 0x2:
1849*4882a593Smuzhiyun 			run->mmio.len = 4;
1850*4882a593Smuzhiyun 			*(u32 *)data = vcpu->arch.gprs[rt];
1851*4882a593Smuzhiyun 
1852*4882a593Smuzhiyun 			kvm_debug("[%#lx] OP_GSSWX: eaddr: %#lx, gpr: %#lx, data: %#x\n",
1853*4882a593Smuzhiyun 				  vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr,
1854*4882a593Smuzhiyun 				  vcpu->arch.gprs[rt], *(u32 *)data);
1855*4882a593Smuzhiyun 			break;
1856*4882a593Smuzhiyun 		case 0x3:
1857*4882a593Smuzhiyun 			run->mmio.len = 8;
1858*4882a593Smuzhiyun 			*(u64 *)data = vcpu->arch.gprs[rt];
1859*4882a593Smuzhiyun 
1860*4882a593Smuzhiyun 			kvm_debug("[%#lx] OP_GSSDX: eaddr: %#lx, gpr: %#lx, data: %#llx\n",
1861*4882a593Smuzhiyun 				  vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr,
1862*4882a593Smuzhiyun 				  vcpu->arch.gprs[rt], *(u64 *)data);
1863*4882a593Smuzhiyun 			break;
1864*4882a593Smuzhiyun 		default:
1865*4882a593Smuzhiyun 			kvm_err("Godson Extended GS-Store not yet supported (inst=0x%08x)\n",
1866*4882a593Smuzhiyun 				inst.word);
1867*4882a593Smuzhiyun 			break;
1868*4882a593Smuzhiyun 		}
1869*4882a593Smuzhiyun 		break;
1870*4882a593Smuzhiyun #endif
1871*4882a593Smuzhiyun 	default:
1872*4882a593Smuzhiyun 		kvm_err("Store not yet supported (inst=0x%08x)\n",
1873*4882a593Smuzhiyun 			inst.word);
1874*4882a593Smuzhiyun 		goto out_fail;
1875*4882a593Smuzhiyun 	}
1876*4882a593Smuzhiyun 
1877*4882a593Smuzhiyun 	vcpu->mmio_needed = 1;
1878*4882a593Smuzhiyun 	run->mmio.is_write = 1;
1879*4882a593Smuzhiyun 	vcpu->mmio_is_write = 1;
1880*4882a593Smuzhiyun 
1881*4882a593Smuzhiyun 	r = kvm_io_bus_write(vcpu, KVM_MMIO_BUS,
1882*4882a593Smuzhiyun 			run->mmio.phys_addr, run->mmio.len, data);
1883*4882a593Smuzhiyun 
1884*4882a593Smuzhiyun 	if (!r) {
1885*4882a593Smuzhiyun 		vcpu->mmio_needed = 0;
1886*4882a593Smuzhiyun 		return EMULATE_DONE;
1887*4882a593Smuzhiyun 	}
1888*4882a593Smuzhiyun 
1889*4882a593Smuzhiyun 	return EMULATE_DO_MMIO;
1890*4882a593Smuzhiyun 
1891*4882a593Smuzhiyun out_fail:
1892*4882a593Smuzhiyun 	/* Rollback PC if emulation was unsuccessful */
1893*4882a593Smuzhiyun 	vcpu->arch.pc = curr_pc;
1894*4882a593Smuzhiyun 	return EMULATE_FAIL;
1895*4882a593Smuzhiyun }
1896*4882a593Smuzhiyun 
kvm_mips_emulate_load(union mips_instruction inst,u32 cause,struct kvm_vcpu * vcpu)1897*4882a593Smuzhiyun enum emulation_result kvm_mips_emulate_load(union mips_instruction inst,
1898*4882a593Smuzhiyun 					    u32 cause, struct kvm_vcpu *vcpu)
1899*4882a593Smuzhiyun {
1900*4882a593Smuzhiyun 	struct kvm_run *run = vcpu->run;
1901*4882a593Smuzhiyun 	int r;
1902*4882a593Smuzhiyun 	enum emulation_result er;
1903*4882a593Smuzhiyun 	unsigned long curr_pc;
1904*4882a593Smuzhiyun 	u32 op, rt;
1905*4882a593Smuzhiyun 	unsigned int imme;
1906*4882a593Smuzhiyun 
1907*4882a593Smuzhiyun 	rt = inst.i_format.rt;
1908*4882a593Smuzhiyun 	op = inst.i_format.opcode;
1909*4882a593Smuzhiyun 
1910*4882a593Smuzhiyun 	/*
1911*4882a593Smuzhiyun 	 * Find the resume PC now while we have safe and easy access to the
1912*4882a593Smuzhiyun 	 * prior branch instruction, and save it for
1913*4882a593Smuzhiyun 	 * kvm_mips_complete_mmio_load() to restore later.
1914*4882a593Smuzhiyun 	 */
1915*4882a593Smuzhiyun 	curr_pc = vcpu->arch.pc;
1916*4882a593Smuzhiyun 	er = update_pc(vcpu, cause);
1917*4882a593Smuzhiyun 	if (er == EMULATE_FAIL)
1918*4882a593Smuzhiyun 		return er;
1919*4882a593Smuzhiyun 	vcpu->arch.io_pc = vcpu->arch.pc;
1920*4882a593Smuzhiyun 	vcpu->arch.pc = curr_pc;
1921*4882a593Smuzhiyun 
1922*4882a593Smuzhiyun 	vcpu->arch.io_gpr = rt;
1923*4882a593Smuzhiyun 
1924*4882a593Smuzhiyun 	run->mmio.phys_addr = kvm_mips_callbacks->gva_to_gpa(
1925*4882a593Smuzhiyun 						vcpu->arch.host_cp0_badvaddr);
1926*4882a593Smuzhiyun 	if (run->mmio.phys_addr == KVM_INVALID_ADDR)
1927*4882a593Smuzhiyun 		return EMULATE_FAIL;
1928*4882a593Smuzhiyun 
1929*4882a593Smuzhiyun 	vcpu->mmio_needed = 2;	/* signed */
1930*4882a593Smuzhiyun 	switch (op) {
1931*4882a593Smuzhiyun #if defined(CONFIG_64BIT) && defined(CONFIG_KVM_MIPS_VZ)
1932*4882a593Smuzhiyun 	case ld_op:
1933*4882a593Smuzhiyun 		run->mmio.len = 8;
1934*4882a593Smuzhiyun 		break;
1935*4882a593Smuzhiyun 
1936*4882a593Smuzhiyun 	case lwu_op:
1937*4882a593Smuzhiyun 		vcpu->mmio_needed = 1;	/* unsigned */
1938*4882a593Smuzhiyun 		fallthrough;
1939*4882a593Smuzhiyun #endif
1940*4882a593Smuzhiyun 	case lw_op:
1941*4882a593Smuzhiyun 		run->mmio.len = 4;
1942*4882a593Smuzhiyun 		break;
1943*4882a593Smuzhiyun 
1944*4882a593Smuzhiyun 	case lhu_op:
1945*4882a593Smuzhiyun 		vcpu->mmio_needed = 1;	/* unsigned */
1946*4882a593Smuzhiyun 		fallthrough;
1947*4882a593Smuzhiyun 	case lh_op:
1948*4882a593Smuzhiyun 		run->mmio.len = 2;
1949*4882a593Smuzhiyun 		break;
1950*4882a593Smuzhiyun 
1951*4882a593Smuzhiyun 	case lbu_op:
1952*4882a593Smuzhiyun 		vcpu->mmio_needed = 1;	/* unsigned */
1953*4882a593Smuzhiyun 		fallthrough;
1954*4882a593Smuzhiyun 	case lb_op:
1955*4882a593Smuzhiyun 		run->mmio.len = 1;
1956*4882a593Smuzhiyun 		break;
1957*4882a593Smuzhiyun 
1958*4882a593Smuzhiyun 	case lwl_op:
1959*4882a593Smuzhiyun 		run->mmio.phys_addr = kvm_mips_callbacks->gva_to_gpa(
1960*4882a593Smuzhiyun 					vcpu->arch.host_cp0_badvaddr) & (~0x3);
1961*4882a593Smuzhiyun 
1962*4882a593Smuzhiyun 		run->mmio.len = 4;
1963*4882a593Smuzhiyun 		imme = vcpu->arch.host_cp0_badvaddr & 0x3;
1964*4882a593Smuzhiyun 		switch (imme) {
1965*4882a593Smuzhiyun 		case 0:
1966*4882a593Smuzhiyun 			vcpu->mmio_needed = 3;	/* 1 byte */
1967*4882a593Smuzhiyun 			break;
1968*4882a593Smuzhiyun 		case 1:
1969*4882a593Smuzhiyun 			vcpu->mmio_needed = 4;	/* 2 bytes */
1970*4882a593Smuzhiyun 			break;
1971*4882a593Smuzhiyun 		case 2:
1972*4882a593Smuzhiyun 			vcpu->mmio_needed = 5;	/* 3 bytes */
1973*4882a593Smuzhiyun 			break;
1974*4882a593Smuzhiyun 		case 3:
1975*4882a593Smuzhiyun 			vcpu->mmio_needed = 6;	/* 4 bytes */
1976*4882a593Smuzhiyun 			break;
1977*4882a593Smuzhiyun 		default:
1978*4882a593Smuzhiyun 			break;
1979*4882a593Smuzhiyun 		}
1980*4882a593Smuzhiyun 		break;
1981*4882a593Smuzhiyun 
1982*4882a593Smuzhiyun 	case lwr_op:
1983*4882a593Smuzhiyun 		run->mmio.phys_addr = kvm_mips_callbacks->gva_to_gpa(
1984*4882a593Smuzhiyun 					vcpu->arch.host_cp0_badvaddr) & (~0x3);
1985*4882a593Smuzhiyun 
1986*4882a593Smuzhiyun 		run->mmio.len = 4;
1987*4882a593Smuzhiyun 		imme = vcpu->arch.host_cp0_badvaddr & 0x3;
1988*4882a593Smuzhiyun 		switch (imme) {
1989*4882a593Smuzhiyun 		case 0:
1990*4882a593Smuzhiyun 			vcpu->mmio_needed = 7;	/* 4 bytes */
1991*4882a593Smuzhiyun 			break;
1992*4882a593Smuzhiyun 		case 1:
1993*4882a593Smuzhiyun 			vcpu->mmio_needed = 8;	/* 3 bytes */
1994*4882a593Smuzhiyun 			break;
1995*4882a593Smuzhiyun 		case 2:
1996*4882a593Smuzhiyun 			vcpu->mmio_needed = 9;	/* 2 bytes */
1997*4882a593Smuzhiyun 			break;
1998*4882a593Smuzhiyun 		case 3:
1999*4882a593Smuzhiyun 			vcpu->mmio_needed = 10;	/* 1 byte */
2000*4882a593Smuzhiyun 			break;
2001*4882a593Smuzhiyun 		default:
2002*4882a593Smuzhiyun 			break;
2003*4882a593Smuzhiyun 		}
2004*4882a593Smuzhiyun 		break;
2005*4882a593Smuzhiyun 
2006*4882a593Smuzhiyun #if defined(CONFIG_64BIT) && defined(CONFIG_KVM_MIPS_VZ)
2007*4882a593Smuzhiyun 	case ldl_op:
2008*4882a593Smuzhiyun 		run->mmio.phys_addr = kvm_mips_callbacks->gva_to_gpa(
2009*4882a593Smuzhiyun 					vcpu->arch.host_cp0_badvaddr) & (~0x7);
2010*4882a593Smuzhiyun 
2011*4882a593Smuzhiyun 		run->mmio.len = 8;
2012*4882a593Smuzhiyun 		imme = vcpu->arch.host_cp0_badvaddr & 0x7;
2013*4882a593Smuzhiyun 		switch (imme) {
2014*4882a593Smuzhiyun 		case 0:
2015*4882a593Smuzhiyun 			vcpu->mmio_needed = 11;	/* 1 byte */
2016*4882a593Smuzhiyun 			break;
2017*4882a593Smuzhiyun 		case 1:
2018*4882a593Smuzhiyun 			vcpu->mmio_needed = 12;	/* 2 bytes */
2019*4882a593Smuzhiyun 			break;
2020*4882a593Smuzhiyun 		case 2:
2021*4882a593Smuzhiyun 			vcpu->mmio_needed = 13;	/* 3 bytes */
2022*4882a593Smuzhiyun 			break;
2023*4882a593Smuzhiyun 		case 3:
2024*4882a593Smuzhiyun 			vcpu->mmio_needed = 14;	/* 4 bytes */
2025*4882a593Smuzhiyun 			break;
2026*4882a593Smuzhiyun 		case 4:
2027*4882a593Smuzhiyun 			vcpu->mmio_needed = 15;	/* 5 bytes */
2028*4882a593Smuzhiyun 			break;
2029*4882a593Smuzhiyun 		case 5:
2030*4882a593Smuzhiyun 			vcpu->mmio_needed = 16;	/* 6 bytes */
2031*4882a593Smuzhiyun 			break;
2032*4882a593Smuzhiyun 		case 6:
2033*4882a593Smuzhiyun 			vcpu->mmio_needed = 17;	/* 7 bytes */
2034*4882a593Smuzhiyun 			break;
2035*4882a593Smuzhiyun 		case 7:
2036*4882a593Smuzhiyun 			vcpu->mmio_needed = 18;	/* 8 bytes */
2037*4882a593Smuzhiyun 			break;
2038*4882a593Smuzhiyun 		default:
2039*4882a593Smuzhiyun 			break;
2040*4882a593Smuzhiyun 		}
2041*4882a593Smuzhiyun 		break;
2042*4882a593Smuzhiyun 
2043*4882a593Smuzhiyun 	case ldr_op:
2044*4882a593Smuzhiyun 		run->mmio.phys_addr = kvm_mips_callbacks->gva_to_gpa(
2045*4882a593Smuzhiyun 					vcpu->arch.host_cp0_badvaddr) & (~0x7);
2046*4882a593Smuzhiyun 
2047*4882a593Smuzhiyun 		run->mmio.len = 8;
2048*4882a593Smuzhiyun 		imme = vcpu->arch.host_cp0_badvaddr & 0x7;
2049*4882a593Smuzhiyun 		switch (imme) {
2050*4882a593Smuzhiyun 		case 0:
2051*4882a593Smuzhiyun 			vcpu->mmio_needed = 19;	/* 8 bytes */
2052*4882a593Smuzhiyun 			break;
2053*4882a593Smuzhiyun 		case 1:
2054*4882a593Smuzhiyun 			vcpu->mmio_needed = 20;	/* 7 bytes */
2055*4882a593Smuzhiyun 			break;
2056*4882a593Smuzhiyun 		case 2:
2057*4882a593Smuzhiyun 			vcpu->mmio_needed = 21;	/* 6 bytes */
2058*4882a593Smuzhiyun 			break;
2059*4882a593Smuzhiyun 		case 3:
2060*4882a593Smuzhiyun 			vcpu->mmio_needed = 22;	/* 5 bytes */
2061*4882a593Smuzhiyun 			break;
2062*4882a593Smuzhiyun 		case 4:
2063*4882a593Smuzhiyun 			vcpu->mmio_needed = 23;	/* 4 bytes */
2064*4882a593Smuzhiyun 			break;
2065*4882a593Smuzhiyun 		case 5:
2066*4882a593Smuzhiyun 			vcpu->mmio_needed = 24;	/* 3 bytes */
2067*4882a593Smuzhiyun 			break;
2068*4882a593Smuzhiyun 		case 6:
2069*4882a593Smuzhiyun 			vcpu->mmio_needed = 25;	/* 2 bytes */
2070*4882a593Smuzhiyun 			break;
2071*4882a593Smuzhiyun 		case 7:
2072*4882a593Smuzhiyun 			vcpu->mmio_needed = 26;	/* 1 byte */
2073*4882a593Smuzhiyun 			break;
2074*4882a593Smuzhiyun 		default:
2075*4882a593Smuzhiyun 			break;
2076*4882a593Smuzhiyun 		}
2077*4882a593Smuzhiyun 		break;
2078*4882a593Smuzhiyun #endif
2079*4882a593Smuzhiyun 
2080*4882a593Smuzhiyun #ifdef CONFIG_CPU_LOONGSON64
2081*4882a593Smuzhiyun 	case ldc2_op:
2082*4882a593Smuzhiyun 		rt = inst.loongson3_lsdc2_format.rt;
2083*4882a593Smuzhiyun 		switch (inst.loongson3_lsdc2_format.opcode1) {
2084*4882a593Smuzhiyun 		/*
2085*4882a593Smuzhiyun 		 * Loongson-3 overridden ldc2 instructions.
2086*4882a593Smuzhiyun 		 * opcode1              instruction
2087*4882a593Smuzhiyun 		 *   0x0          gslbx: store 1 bytes from GPR
2088*4882a593Smuzhiyun 		 *   0x1          gslhx: store 2 bytes from GPR
2089*4882a593Smuzhiyun 		 *   0x2          gslwx: store 4 bytes from GPR
2090*4882a593Smuzhiyun 		 *   0x3          gsldx: store 8 bytes from GPR
2091*4882a593Smuzhiyun 		 */
2092*4882a593Smuzhiyun 		case 0x0:
2093*4882a593Smuzhiyun 			run->mmio.len = 1;
2094*4882a593Smuzhiyun 			vcpu->mmio_needed = 27;	/* signed */
2095*4882a593Smuzhiyun 			break;
2096*4882a593Smuzhiyun 		case 0x1:
2097*4882a593Smuzhiyun 			run->mmio.len = 2;
2098*4882a593Smuzhiyun 			vcpu->mmio_needed = 28;	/* signed */
2099*4882a593Smuzhiyun 			break;
2100*4882a593Smuzhiyun 		case 0x2:
2101*4882a593Smuzhiyun 			run->mmio.len = 4;
2102*4882a593Smuzhiyun 			vcpu->mmio_needed = 29;	/* signed */
2103*4882a593Smuzhiyun 			break;
2104*4882a593Smuzhiyun 		case 0x3:
2105*4882a593Smuzhiyun 			run->mmio.len = 8;
2106*4882a593Smuzhiyun 			vcpu->mmio_needed = 30;	/* signed */
2107*4882a593Smuzhiyun 			break;
2108*4882a593Smuzhiyun 		default:
2109*4882a593Smuzhiyun 			kvm_err("Godson Extended GS-Load for float not yet supported (inst=0x%08x)\n",
2110*4882a593Smuzhiyun 				inst.word);
2111*4882a593Smuzhiyun 			break;
2112*4882a593Smuzhiyun 		}
2113*4882a593Smuzhiyun 		break;
2114*4882a593Smuzhiyun #endif
2115*4882a593Smuzhiyun 
2116*4882a593Smuzhiyun 	default:
2117*4882a593Smuzhiyun 		kvm_err("Load not yet supported (inst=0x%08x)\n",
2118*4882a593Smuzhiyun 			inst.word);
2119*4882a593Smuzhiyun 		vcpu->mmio_needed = 0;
2120*4882a593Smuzhiyun 		return EMULATE_FAIL;
2121*4882a593Smuzhiyun 	}
2122*4882a593Smuzhiyun 
2123*4882a593Smuzhiyun 	run->mmio.is_write = 0;
2124*4882a593Smuzhiyun 	vcpu->mmio_is_write = 0;
2125*4882a593Smuzhiyun 
2126*4882a593Smuzhiyun 	r = kvm_io_bus_read(vcpu, KVM_MMIO_BUS,
2127*4882a593Smuzhiyun 			run->mmio.phys_addr, run->mmio.len, run->mmio.data);
2128*4882a593Smuzhiyun 
2129*4882a593Smuzhiyun 	if (!r) {
2130*4882a593Smuzhiyun 		kvm_mips_complete_mmio_load(vcpu);
2131*4882a593Smuzhiyun 		vcpu->mmio_needed = 0;
2132*4882a593Smuzhiyun 		return EMULATE_DONE;
2133*4882a593Smuzhiyun 	}
2134*4882a593Smuzhiyun 
2135*4882a593Smuzhiyun 	return EMULATE_DO_MMIO;
2136*4882a593Smuzhiyun }
2137*4882a593Smuzhiyun 
2138*4882a593Smuzhiyun #ifndef CONFIG_KVM_MIPS_VZ
kvm_mips_guest_cache_op(int (* fn)(unsigned long),unsigned long curr_pc,unsigned long addr,struct kvm_vcpu * vcpu,u32 cause)2139*4882a593Smuzhiyun static enum emulation_result kvm_mips_guest_cache_op(int (*fn)(unsigned long),
2140*4882a593Smuzhiyun 						     unsigned long curr_pc,
2141*4882a593Smuzhiyun 						     unsigned long addr,
2142*4882a593Smuzhiyun 						     struct kvm_vcpu *vcpu,
2143*4882a593Smuzhiyun 						     u32 cause)
2144*4882a593Smuzhiyun {
2145*4882a593Smuzhiyun 	int err;
2146*4882a593Smuzhiyun 
2147*4882a593Smuzhiyun 	for (;;) {
2148*4882a593Smuzhiyun 		/* Carefully attempt the cache operation */
2149*4882a593Smuzhiyun 		kvm_trap_emul_gva_lockless_begin(vcpu);
2150*4882a593Smuzhiyun 		err = fn(addr);
2151*4882a593Smuzhiyun 		kvm_trap_emul_gva_lockless_end(vcpu);
2152*4882a593Smuzhiyun 
2153*4882a593Smuzhiyun 		if (likely(!err))
2154*4882a593Smuzhiyun 			return EMULATE_DONE;
2155*4882a593Smuzhiyun 
2156*4882a593Smuzhiyun 		/*
2157*4882a593Smuzhiyun 		 * Try to handle the fault and retry, maybe we just raced with a
2158*4882a593Smuzhiyun 		 * GVA invalidation.
2159*4882a593Smuzhiyun 		 */
2160*4882a593Smuzhiyun 		switch (kvm_trap_emul_gva_fault(vcpu, addr, false)) {
2161*4882a593Smuzhiyun 		case KVM_MIPS_GVA:
2162*4882a593Smuzhiyun 		case KVM_MIPS_GPA:
2163*4882a593Smuzhiyun 			/* bad virtual or physical address */
2164*4882a593Smuzhiyun 			return EMULATE_FAIL;
2165*4882a593Smuzhiyun 		case KVM_MIPS_TLB:
2166*4882a593Smuzhiyun 			/* no matching guest TLB */
2167*4882a593Smuzhiyun 			vcpu->arch.host_cp0_badvaddr = addr;
2168*4882a593Smuzhiyun 			vcpu->arch.pc = curr_pc;
2169*4882a593Smuzhiyun 			kvm_mips_emulate_tlbmiss_ld(cause, NULL, vcpu);
2170*4882a593Smuzhiyun 			return EMULATE_EXCEPT;
2171*4882a593Smuzhiyun 		case KVM_MIPS_TLBINV:
2172*4882a593Smuzhiyun 			/* invalid matching guest TLB */
2173*4882a593Smuzhiyun 			vcpu->arch.host_cp0_badvaddr = addr;
2174*4882a593Smuzhiyun 			vcpu->arch.pc = curr_pc;
2175*4882a593Smuzhiyun 			kvm_mips_emulate_tlbinv_ld(cause, NULL, vcpu);
2176*4882a593Smuzhiyun 			return EMULATE_EXCEPT;
2177*4882a593Smuzhiyun 		default:
2178*4882a593Smuzhiyun 			break;
2179*4882a593Smuzhiyun 		}
2180*4882a593Smuzhiyun 	}
2181*4882a593Smuzhiyun }
2182*4882a593Smuzhiyun 
kvm_mips_emulate_cache(union mips_instruction inst,u32 * opc,u32 cause,struct kvm_vcpu * vcpu)2183*4882a593Smuzhiyun enum emulation_result kvm_mips_emulate_cache(union mips_instruction inst,
2184*4882a593Smuzhiyun 					     u32 *opc, u32 cause,
2185*4882a593Smuzhiyun 					     struct kvm_vcpu *vcpu)
2186*4882a593Smuzhiyun {
2187*4882a593Smuzhiyun 	enum emulation_result er = EMULATE_DONE;
2188*4882a593Smuzhiyun 	u32 cache, op_inst, op, base;
2189*4882a593Smuzhiyun 	s16 offset;
2190*4882a593Smuzhiyun 	struct kvm_vcpu_arch *arch = &vcpu->arch;
2191*4882a593Smuzhiyun 	unsigned long va;
2192*4882a593Smuzhiyun 	unsigned long curr_pc;
2193*4882a593Smuzhiyun 
2194*4882a593Smuzhiyun 	/*
2195*4882a593Smuzhiyun 	 * Update PC and hold onto current PC in case there is
2196*4882a593Smuzhiyun 	 * an error and we want to rollback the PC
2197*4882a593Smuzhiyun 	 */
2198*4882a593Smuzhiyun 	curr_pc = vcpu->arch.pc;
2199*4882a593Smuzhiyun 	er = update_pc(vcpu, cause);
2200*4882a593Smuzhiyun 	if (er == EMULATE_FAIL)
2201*4882a593Smuzhiyun 		return er;
2202*4882a593Smuzhiyun 
2203*4882a593Smuzhiyun 	base = inst.i_format.rs;
2204*4882a593Smuzhiyun 	op_inst = inst.i_format.rt;
2205*4882a593Smuzhiyun 	if (cpu_has_mips_r6)
2206*4882a593Smuzhiyun 		offset = inst.spec3_format.simmediate;
2207*4882a593Smuzhiyun 	else
2208*4882a593Smuzhiyun 		offset = inst.i_format.simmediate;
2209*4882a593Smuzhiyun 	cache = op_inst & CacheOp_Cache;
2210*4882a593Smuzhiyun 	op = op_inst & CacheOp_Op;
2211*4882a593Smuzhiyun 
2212*4882a593Smuzhiyun 	va = arch->gprs[base] + offset;
2213*4882a593Smuzhiyun 
2214*4882a593Smuzhiyun 	kvm_debug("CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n",
2215*4882a593Smuzhiyun 		  cache, op, base, arch->gprs[base], offset);
2216*4882a593Smuzhiyun 
2217*4882a593Smuzhiyun 	/*
2218*4882a593Smuzhiyun 	 * Treat INDEX_INV as a nop, basically issued by Linux on startup to
2219*4882a593Smuzhiyun 	 * invalidate the caches entirely by stepping through all the
2220*4882a593Smuzhiyun 	 * ways/indexes
2221*4882a593Smuzhiyun 	 */
2222*4882a593Smuzhiyun 	if (op == Index_Writeback_Inv) {
2223*4882a593Smuzhiyun 		kvm_debug("@ %#lx/%#lx CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n",
2224*4882a593Smuzhiyun 			  vcpu->arch.pc, vcpu->arch.gprs[31], cache, op, base,
2225*4882a593Smuzhiyun 			  arch->gprs[base], offset);
2226*4882a593Smuzhiyun 
2227*4882a593Smuzhiyun 		if (cache == Cache_D) {
2228*4882a593Smuzhiyun #ifdef CONFIG_CPU_R4K_CACHE_TLB
2229*4882a593Smuzhiyun 			r4k_blast_dcache();
2230*4882a593Smuzhiyun #else
2231*4882a593Smuzhiyun 			switch (boot_cpu_type()) {
2232*4882a593Smuzhiyun 			case CPU_CAVIUM_OCTEON3:
2233*4882a593Smuzhiyun 				/* locally flush icache */
2234*4882a593Smuzhiyun 				local_flush_icache_range(0, 0);
2235*4882a593Smuzhiyun 				break;
2236*4882a593Smuzhiyun 			default:
2237*4882a593Smuzhiyun 				__flush_cache_all();
2238*4882a593Smuzhiyun 				break;
2239*4882a593Smuzhiyun 			}
2240*4882a593Smuzhiyun #endif
2241*4882a593Smuzhiyun 		} else if (cache == Cache_I) {
2242*4882a593Smuzhiyun #ifdef CONFIG_CPU_R4K_CACHE_TLB
2243*4882a593Smuzhiyun 			r4k_blast_icache();
2244*4882a593Smuzhiyun #else
2245*4882a593Smuzhiyun 			switch (boot_cpu_type()) {
2246*4882a593Smuzhiyun 			case CPU_CAVIUM_OCTEON3:
2247*4882a593Smuzhiyun 				/* locally flush icache */
2248*4882a593Smuzhiyun 				local_flush_icache_range(0, 0);
2249*4882a593Smuzhiyun 				break;
2250*4882a593Smuzhiyun 			default:
2251*4882a593Smuzhiyun 				flush_icache_all();
2252*4882a593Smuzhiyun 				break;
2253*4882a593Smuzhiyun 			}
2254*4882a593Smuzhiyun #endif
2255*4882a593Smuzhiyun 		} else {
2256*4882a593Smuzhiyun 			kvm_err("%s: unsupported CACHE INDEX operation\n",
2257*4882a593Smuzhiyun 				__func__);
2258*4882a593Smuzhiyun 			return EMULATE_FAIL;
2259*4882a593Smuzhiyun 		}
2260*4882a593Smuzhiyun 
2261*4882a593Smuzhiyun #ifdef CONFIG_KVM_MIPS_DYN_TRANS
2262*4882a593Smuzhiyun 		kvm_mips_trans_cache_index(inst, opc, vcpu);
2263*4882a593Smuzhiyun #endif
2264*4882a593Smuzhiyun 		goto done;
2265*4882a593Smuzhiyun 	}
2266*4882a593Smuzhiyun 
2267*4882a593Smuzhiyun 	/* XXXKYMA: Only a subset of cache ops are supported, used by Linux */
2268*4882a593Smuzhiyun 	if (op_inst == Hit_Writeback_Inv_D || op_inst == Hit_Invalidate_D) {
2269*4882a593Smuzhiyun 		/*
2270*4882a593Smuzhiyun 		 * Perform the dcache part of icache synchronisation on the
2271*4882a593Smuzhiyun 		 * guest's behalf.
2272*4882a593Smuzhiyun 		 */
2273*4882a593Smuzhiyun 		er = kvm_mips_guest_cache_op(protected_writeback_dcache_line,
2274*4882a593Smuzhiyun 					     curr_pc, va, vcpu, cause);
2275*4882a593Smuzhiyun 		if (er != EMULATE_DONE)
2276*4882a593Smuzhiyun 			goto done;
2277*4882a593Smuzhiyun #ifdef CONFIG_KVM_MIPS_DYN_TRANS
2278*4882a593Smuzhiyun 		/*
2279*4882a593Smuzhiyun 		 * Replace the CACHE instruction, with a SYNCI, not the same,
2280*4882a593Smuzhiyun 		 * but avoids a trap
2281*4882a593Smuzhiyun 		 */
2282*4882a593Smuzhiyun 		kvm_mips_trans_cache_va(inst, opc, vcpu);
2283*4882a593Smuzhiyun #endif
2284*4882a593Smuzhiyun 	} else if (op_inst == Hit_Invalidate_I) {
2285*4882a593Smuzhiyun 		/* Perform the icache synchronisation on the guest's behalf */
2286*4882a593Smuzhiyun 		er = kvm_mips_guest_cache_op(protected_writeback_dcache_line,
2287*4882a593Smuzhiyun 					     curr_pc, va, vcpu, cause);
2288*4882a593Smuzhiyun 		if (er != EMULATE_DONE)
2289*4882a593Smuzhiyun 			goto done;
2290*4882a593Smuzhiyun 		er = kvm_mips_guest_cache_op(protected_flush_icache_line,
2291*4882a593Smuzhiyun 					     curr_pc, va, vcpu, cause);
2292*4882a593Smuzhiyun 		if (er != EMULATE_DONE)
2293*4882a593Smuzhiyun 			goto done;
2294*4882a593Smuzhiyun 
2295*4882a593Smuzhiyun #ifdef CONFIG_KVM_MIPS_DYN_TRANS
2296*4882a593Smuzhiyun 		/* Replace the CACHE instruction, with a SYNCI */
2297*4882a593Smuzhiyun 		kvm_mips_trans_cache_va(inst, opc, vcpu);
2298*4882a593Smuzhiyun #endif
2299*4882a593Smuzhiyun 	} else {
2300*4882a593Smuzhiyun 		kvm_err("NO-OP CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n",
2301*4882a593Smuzhiyun 			cache, op, base, arch->gprs[base], offset);
2302*4882a593Smuzhiyun 		er = EMULATE_FAIL;
2303*4882a593Smuzhiyun 	}
2304*4882a593Smuzhiyun 
2305*4882a593Smuzhiyun done:
2306*4882a593Smuzhiyun 	/* Rollback PC only if emulation was unsuccessful */
2307*4882a593Smuzhiyun 	if (er == EMULATE_FAIL)
2308*4882a593Smuzhiyun 		vcpu->arch.pc = curr_pc;
2309*4882a593Smuzhiyun 	/* Guest exception needs guest to resume */
2310*4882a593Smuzhiyun 	if (er == EMULATE_EXCEPT)
2311*4882a593Smuzhiyun 		er = EMULATE_DONE;
2312*4882a593Smuzhiyun 
2313*4882a593Smuzhiyun 	return er;
2314*4882a593Smuzhiyun }
2315*4882a593Smuzhiyun 
kvm_mips_emulate_inst(u32 cause,u32 * opc,struct kvm_vcpu * vcpu)2316*4882a593Smuzhiyun enum emulation_result kvm_mips_emulate_inst(u32 cause, u32 *opc,
2317*4882a593Smuzhiyun 					    struct kvm_vcpu *vcpu)
2318*4882a593Smuzhiyun {
2319*4882a593Smuzhiyun 	union mips_instruction inst;
2320*4882a593Smuzhiyun 	enum emulation_result er = EMULATE_DONE;
2321*4882a593Smuzhiyun 	int err;
2322*4882a593Smuzhiyun 
2323*4882a593Smuzhiyun 	/* Fetch the instruction. */
2324*4882a593Smuzhiyun 	if (cause & CAUSEF_BD)
2325*4882a593Smuzhiyun 		opc += 1;
2326*4882a593Smuzhiyun 	err = kvm_get_badinstr(opc, vcpu, &inst.word);
2327*4882a593Smuzhiyun 	if (err)
2328*4882a593Smuzhiyun 		return EMULATE_FAIL;
2329*4882a593Smuzhiyun 
2330*4882a593Smuzhiyun 	switch (inst.r_format.opcode) {
2331*4882a593Smuzhiyun 	case cop0_op:
2332*4882a593Smuzhiyun 		er = kvm_mips_emulate_CP0(inst, opc, cause, vcpu);
2333*4882a593Smuzhiyun 		break;
2334*4882a593Smuzhiyun 
2335*4882a593Smuzhiyun #ifndef CONFIG_CPU_MIPSR6
2336*4882a593Smuzhiyun 	case cache_op:
2337*4882a593Smuzhiyun 		++vcpu->stat.cache_exits;
2338*4882a593Smuzhiyun 		trace_kvm_exit(vcpu, KVM_TRACE_EXIT_CACHE);
2339*4882a593Smuzhiyun 		er = kvm_mips_emulate_cache(inst, opc, cause, vcpu);
2340*4882a593Smuzhiyun 		break;
2341*4882a593Smuzhiyun #else
2342*4882a593Smuzhiyun 	case spec3_op:
2343*4882a593Smuzhiyun 		switch (inst.spec3_format.func) {
2344*4882a593Smuzhiyun 		case cache6_op:
2345*4882a593Smuzhiyun 			++vcpu->stat.cache_exits;
2346*4882a593Smuzhiyun 			trace_kvm_exit(vcpu, KVM_TRACE_EXIT_CACHE);
2347*4882a593Smuzhiyun 			er = kvm_mips_emulate_cache(inst, opc, cause,
2348*4882a593Smuzhiyun 						    vcpu);
2349*4882a593Smuzhiyun 			break;
2350*4882a593Smuzhiyun 		default:
2351*4882a593Smuzhiyun 			goto unknown;
2352*4882a593Smuzhiyun 		}
2353*4882a593Smuzhiyun 		break;
2354*4882a593Smuzhiyun unknown:
2355*4882a593Smuzhiyun #endif
2356*4882a593Smuzhiyun 
2357*4882a593Smuzhiyun 	default:
2358*4882a593Smuzhiyun 		kvm_err("Instruction emulation not supported (%p/%#x)\n", opc,
2359*4882a593Smuzhiyun 			inst.word);
2360*4882a593Smuzhiyun 		kvm_arch_vcpu_dump_regs(vcpu);
2361*4882a593Smuzhiyun 		er = EMULATE_FAIL;
2362*4882a593Smuzhiyun 		break;
2363*4882a593Smuzhiyun 	}
2364*4882a593Smuzhiyun 
2365*4882a593Smuzhiyun 	return er;
2366*4882a593Smuzhiyun }
2367*4882a593Smuzhiyun #endif /* CONFIG_KVM_MIPS_VZ */
2368*4882a593Smuzhiyun 
2369*4882a593Smuzhiyun /**
2370*4882a593Smuzhiyun  * kvm_mips_guest_exception_base() - Find guest exception vector base address.
2371*4882a593Smuzhiyun  *
2372*4882a593Smuzhiyun  * Returns:	The base address of the current guest exception vector, taking
2373*4882a593Smuzhiyun  *		both Guest.CP0_Status.BEV and Guest.CP0_EBase into account.
2374*4882a593Smuzhiyun  */
kvm_mips_guest_exception_base(struct kvm_vcpu * vcpu)2375*4882a593Smuzhiyun long kvm_mips_guest_exception_base(struct kvm_vcpu *vcpu)
2376*4882a593Smuzhiyun {
2377*4882a593Smuzhiyun 	struct mips_coproc *cop0 = vcpu->arch.cop0;
2378*4882a593Smuzhiyun 
2379*4882a593Smuzhiyun 	if (kvm_read_c0_guest_status(cop0) & ST0_BEV)
2380*4882a593Smuzhiyun 		return KVM_GUEST_CKSEG1ADDR(0x1fc00200);
2381*4882a593Smuzhiyun 	else
2382*4882a593Smuzhiyun 		return kvm_read_c0_guest_ebase(cop0) & MIPS_EBASE_BASE;
2383*4882a593Smuzhiyun }
2384*4882a593Smuzhiyun 
kvm_mips_emulate_syscall(u32 cause,u32 * opc,struct kvm_vcpu * vcpu)2385*4882a593Smuzhiyun enum emulation_result kvm_mips_emulate_syscall(u32 cause,
2386*4882a593Smuzhiyun 					       u32 *opc,
2387*4882a593Smuzhiyun 					       struct kvm_vcpu *vcpu)
2388*4882a593Smuzhiyun {
2389*4882a593Smuzhiyun 	struct mips_coproc *cop0 = vcpu->arch.cop0;
2390*4882a593Smuzhiyun 	struct kvm_vcpu_arch *arch = &vcpu->arch;
2391*4882a593Smuzhiyun 	enum emulation_result er = EMULATE_DONE;
2392*4882a593Smuzhiyun 
2393*4882a593Smuzhiyun 	if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
2394*4882a593Smuzhiyun 		/* save old pc */
2395*4882a593Smuzhiyun 		kvm_write_c0_guest_epc(cop0, arch->pc);
2396*4882a593Smuzhiyun 		kvm_set_c0_guest_status(cop0, ST0_EXL);
2397*4882a593Smuzhiyun 
2398*4882a593Smuzhiyun 		if (cause & CAUSEF_BD)
2399*4882a593Smuzhiyun 			kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
2400*4882a593Smuzhiyun 		else
2401*4882a593Smuzhiyun 			kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
2402*4882a593Smuzhiyun 
2403*4882a593Smuzhiyun 		kvm_debug("Delivering SYSCALL @ pc %#lx\n", arch->pc);
2404*4882a593Smuzhiyun 
2405*4882a593Smuzhiyun 		kvm_change_c0_guest_cause(cop0, (0xff),
2406*4882a593Smuzhiyun 					  (EXCCODE_SYS << CAUSEB_EXCCODE));
2407*4882a593Smuzhiyun 
2408*4882a593Smuzhiyun 		/* Set PC to the exception entry point */
2409*4882a593Smuzhiyun 		arch->pc = kvm_mips_guest_exception_base(vcpu) + 0x180;
2410*4882a593Smuzhiyun 
2411*4882a593Smuzhiyun 	} else {
2412*4882a593Smuzhiyun 		kvm_err("Trying to deliver SYSCALL when EXL is already set\n");
2413*4882a593Smuzhiyun 		er = EMULATE_FAIL;
2414*4882a593Smuzhiyun 	}
2415*4882a593Smuzhiyun 
2416*4882a593Smuzhiyun 	return er;
2417*4882a593Smuzhiyun }
2418*4882a593Smuzhiyun 
kvm_mips_emulate_tlbmiss_ld(u32 cause,u32 * opc,struct kvm_vcpu * vcpu)2419*4882a593Smuzhiyun enum emulation_result kvm_mips_emulate_tlbmiss_ld(u32 cause,
2420*4882a593Smuzhiyun 						  u32 *opc,
2421*4882a593Smuzhiyun 						  struct kvm_vcpu *vcpu)
2422*4882a593Smuzhiyun {
2423*4882a593Smuzhiyun 	struct mips_coproc *cop0 = vcpu->arch.cop0;
2424*4882a593Smuzhiyun 	struct kvm_vcpu_arch *arch = &vcpu->arch;
2425*4882a593Smuzhiyun 	unsigned long entryhi = (vcpu->arch.  host_cp0_badvaddr & VPN2_MASK) |
2426*4882a593Smuzhiyun 			(kvm_read_c0_guest_entryhi(cop0) & KVM_ENTRYHI_ASID);
2427*4882a593Smuzhiyun 
2428*4882a593Smuzhiyun 	if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
2429*4882a593Smuzhiyun 		/* save old pc */
2430*4882a593Smuzhiyun 		kvm_write_c0_guest_epc(cop0, arch->pc);
2431*4882a593Smuzhiyun 		kvm_set_c0_guest_status(cop0, ST0_EXL);
2432*4882a593Smuzhiyun 
2433*4882a593Smuzhiyun 		if (cause & CAUSEF_BD)
2434*4882a593Smuzhiyun 			kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
2435*4882a593Smuzhiyun 		else
2436*4882a593Smuzhiyun 			kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
2437*4882a593Smuzhiyun 
2438*4882a593Smuzhiyun 		kvm_debug("[EXL == 0] delivering TLB MISS @ pc %#lx\n",
2439*4882a593Smuzhiyun 			  arch->pc);
2440*4882a593Smuzhiyun 
2441*4882a593Smuzhiyun 		/* set pc to the exception entry point */
2442*4882a593Smuzhiyun 		arch->pc = kvm_mips_guest_exception_base(vcpu) + 0x0;
2443*4882a593Smuzhiyun 
2444*4882a593Smuzhiyun 	} else {
2445*4882a593Smuzhiyun 		kvm_debug("[EXL == 1] delivering TLB MISS @ pc %#lx\n",
2446*4882a593Smuzhiyun 			  arch->pc);
2447*4882a593Smuzhiyun 
2448*4882a593Smuzhiyun 		arch->pc = kvm_mips_guest_exception_base(vcpu) + 0x180;
2449*4882a593Smuzhiyun 	}
2450*4882a593Smuzhiyun 
2451*4882a593Smuzhiyun 	kvm_change_c0_guest_cause(cop0, (0xff),
2452*4882a593Smuzhiyun 				  (EXCCODE_TLBL << CAUSEB_EXCCODE));
2453*4882a593Smuzhiyun 
2454*4882a593Smuzhiyun 	/* setup badvaddr, context and entryhi registers for the guest */
2455*4882a593Smuzhiyun 	kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr);
2456*4882a593Smuzhiyun 	/* XXXKYMA: is the context register used by linux??? */
2457*4882a593Smuzhiyun 	kvm_write_c0_guest_entryhi(cop0, entryhi);
2458*4882a593Smuzhiyun 
2459*4882a593Smuzhiyun 	return EMULATE_DONE;
2460*4882a593Smuzhiyun }
2461*4882a593Smuzhiyun 
kvm_mips_emulate_tlbinv_ld(u32 cause,u32 * opc,struct kvm_vcpu * vcpu)2462*4882a593Smuzhiyun enum emulation_result kvm_mips_emulate_tlbinv_ld(u32 cause,
2463*4882a593Smuzhiyun 						 u32 *opc,
2464*4882a593Smuzhiyun 						 struct kvm_vcpu *vcpu)
2465*4882a593Smuzhiyun {
2466*4882a593Smuzhiyun 	struct mips_coproc *cop0 = vcpu->arch.cop0;
2467*4882a593Smuzhiyun 	struct kvm_vcpu_arch *arch = &vcpu->arch;
2468*4882a593Smuzhiyun 	unsigned long entryhi =
2469*4882a593Smuzhiyun 		(vcpu->arch.host_cp0_badvaddr & VPN2_MASK) |
2470*4882a593Smuzhiyun 		(kvm_read_c0_guest_entryhi(cop0) & KVM_ENTRYHI_ASID);
2471*4882a593Smuzhiyun 
2472*4882a593Smuzhiyun 	if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
2473*4882a593Smuzhiyun 		/* save old pc */
2474*4882a593Smuzhiyun 		kvm_write_c0_guest_epc(cop0, arch->pc);
2475*4882a593Smuzhiyun 		kvm_set_c0_guest_status(cop0, ST0_EXL);
2476*4882a593Smuzhiyun 
2477*4882a593Smuzhiyun 		if (cause & CAUSEF_BD)
2478*4882a593Smuzhiyun 			kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
2479*4882a593Smuzhiyun 		else
2480*4882a593Smuzhiyun 			kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
2481*4882a593Smuzhiyun 
2482*4882a593Smuzhiyun 		kvm_debug("[EXL == 0] delivering TLB INV @ pc %#lx\n",
2483*4882a593Smuzhiyun 			  arch->pc);
2484*4882a593Smuzhiyun 	} else {
2485*4882a593Smuzhiyun 		kvm_debug("[EXL == 1] delivering TLB MISS @ pc %#lx\n",
2486*4882a593Smuzhiyun 			  arch->pc);
2487*4882a593Smuzhiyun 	}
2488*4882a593Smuzhiyun 
2489*4882a593Smuzhiyun 	/* set pc to the exception entry point */
2490*4882a593Smuzhiyun 	arch->pc = kvm_mips_guest_exception_base(vcpu) + 0x180;
2491*4882a593Smuzhiyun 
2492*4882a593Smuzhiyun 	kvm_change_c0_guest_cause(cop0, (0xff),
2493*4882a593Smuzhiyun 				  (EXCCODE_TLBL << CAUSEB_EXCCODE));
2494*4882a593Smuzhiyun 
2495*4882a593Smuzhiyun 	/* setup badvaddr, context and entryhi registers for the guest */
2496*4882a593Smuzhiyun 	kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr);
2497*4882a593Smuzhiyun 	/* XXXKYMA: is the context register used by linux??? */
2498*4882a593Smuzhiyun 	kvm_write_c0_guest_entryhi(cop0, entryhi);
2499*4882a593Smuzhiyun 
2500*4882a593Smuzhiyun 	return EMULATE_DONE;
2501*4882a593Smuzhiyun }
2502*4882a593Smuzhiyun 
kvm_mips_emulate_tlbmiss_st(u32 cause,u32 * opc,struct kvm_vcpu * vcpu)2503*4882a593Smuzhiyun enum emulation_result kvm_mips_emulate_tlbmiss_st(u32 cause,
2504*4882a593Smuzhiyun 						  u32 *opc,
2505*4882a593Smuzhiyun 						  struct kvm_vcpu *vcpu)
2506*4882a593Smuzhiyun {
2507*4882a593Smuzhiyun 	struct mips_coproc *cop0 = vcpu->arch.cop0;
2508*4882a593Smuzhiyun 	struct kvm_vcpu_arch *arch = &vcpu->arch;
2509*4882a593Smuzhiyun 	unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) |
2510*4882a593Smuzhiyun 			(kvm_read_c0_guest_entryhi(cop0) & KVM_ENTRYHI_ASID);
2511*4882a593Smuzhiyun 
2512*4882a593Smuzhiyun 	if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
2513*4882a593Smuzhiyun 		/* save old pc */
2514*4882a593Smuzhiyun 		kvm_write_c0_guest_epc(cop0, arch->pc);
2515*4882a593Smuzhiyun 		kvm_set_c0_guest_status(cop0, ST0_EXL);
2516*4882a593Smuzhiyun 
2517*4882a593Smuzhiyun 		if (cause & CAUSEF_BD)
2518*4882a593Smuzhiyun 			kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
2519*4882a593Smuzhiyun 		else
2520*4882a593Smuzhiyun 			kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
2521*4882a593Smuzhiyun 
2522*4882a593Smuzhiyun 		kvm_debug("[EXL == 0] Delivering TLB MISS @ pc %#lx\n",
2523*4882a593Smuzhiyun 			  arch->pc);
2524*4882a593Smuzhiyun 
2525*4882a593Smuzhiyun 		/* Set PC to the exception entry point */
2526*4882a593Smuzhiyun 		arch->pc = kvm_mips_guest_exception_base(vcpu) + 0x0;
2527*4882a593Smuzhiyun 	} else {
2528*4882a593Smuzhiyun 		kvm_debug("[EXL == 1] Delivering TLB MISS @ pc %#lx\n",
2529*4882a593Smuzhiyun 			  arch->pc);
2530*4882a593Smuzhiyun 		arch->pc = kvm_mips_guest_exception_base(vcpu) + 0x180;
2531*4882a593Smuzhiyun 	}
2532*4882a593Smuzhiyun 
2533*4882a593Smuzhiyun 	kvm_change_c0_guest_cause(cop0, (0xff),
2534*4882a593Smuzhiyun 				  (EXCCODE_TLBS << CAUSEB_EXCCODE));
2535*4882a593Smuzhiyun 
2536*4882a593Smuzhiyun 	/* setup badvaddr, context and entryhi registers for the guest */
2537*4882a593Smuzhiyun 	kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr);
2538*4882a593Smuzhiyun 	/* XXXKYMA: is the context register used by linux??? */
2539*4882a593Smuzhiyun 	kvm_write_c0_guest_entryhi(cop0, entryhi);
2540*4882a593Smuzhiyun 
2541*4882a593Smuzhiyun 	return EMULATE_DONE;
2542*4882a593Smuzhiyun }
2543*4882a593Smuzhiyun 
kvm_mips_emulate_tlbinv_st(u32 cause,u32 * opc,struct kvm_vcpu * vcpu)2544*4882a593Smuzhiyun enum emulation_result kvm_mips_emulate_tlbinv_st(u32 cause,
2545*4882a593Smuzhiyun 						 u32 *opc,
2546*4882a593Smuzhiyun 						 struct kvm_vcpu *vcpu)
2547*4882a593Smuzhiyun {
2548*4882a593Smuzhiyun 	struct mips_coproc *cop0 = vcpu->arch.cop0;
2549*4882a593Smuzhiyun 	struct kvm_vcpu_arch *arch = &vcpu->arch;
2550*4882a593Smuzhiyun 	unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) |
2551*4882a593Smuzhiyun 		(kvm_read_c0_guest_entryhi(cop0) & KVM_ENTRYHI_ASID);
2552*4882a593Smuzhiyun 
2553*4882a593Smuzhiyun 	if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
2554*4882a593Smuzhiyun 		/* save old pc */
2555*4882a593Smuzhiyun 		kvm_write_c0_guest_epc(cop0, arch->pc);
2556*4882a593Smuzhiyun 		kvm_set_c0_guest_status(cop0, ST0_EXL);
2557*4882a593Smuzhiyun 
2558*4882a593Smuzhiyun 		if (cause & CAUSEF_BD)
2559*4882a593Smuzhiyun 			kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
2560*4882a593Smuzhiyun 		else
2561*4882a593Smuzhiyun 			kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
2562*4882a593Smuzhiyun 
2563*4882a593Smuzhiyun 		kvm_debug("[EXL == 0] Delivering TLB MISS @ pc %#lx\n",
2564*4882a593Smuzhiyun 			  arch->pc);
2565*4882a593Smuzhiyun 	} else {
2566*4882a593Smuzhiyun 		kvm_debug("[EXL == 1] Delivering TLB MISS @ pc %#lx\n",
2567*4882a593Smuzhiyun 			  arch->pc);
2568*4882a593Smuzhiyun 	}
2569*4882a593Smuzhiyun 
2570*4882a593Smuzhiyun 	/* Set PC to the exception entry point */
2571*4882a593Smuzhiyun 	arch->pc = kvm_mips_guest_exception_base(vcpu) + 0x180;
2572*4882a593Smuzhiyun 
2573*4882a593Smuzhiyun 	kvm_change_c0_guest_cause(cop0, (0xff),
2574*4882a593Smuzhiyun 				  (EXCCODE_TLBS << CAUSEB_EXCCODE));
2575*4882a593Smuzhiyun 
2576*4882a593Smuzhiyun 	/* setup badvaddr, context and entryhi registers for the guest */
2577*4882a593Smuzhiyun 	kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr);
2578*4882a593Smuzhiyun 	/* XXXKYMA: is the context register used by linux??? */
2579*4882a593Smuzhiyun 	kvm_write_c0_guest_entryhi(cop0, entryhi);
2580*4882a593Smuzhiyun 
2581*4882a593Smuzhiyun 	return EMULATE_DONE;
2582*4882a593Smuzhiyun }
2583*4882a593Smuzhiyun 
kvm_mips_emulate_tlbmod(u32 cause,u32 * opc,struct kvm_vcpu * vcpu)2584*4882a593Smuzhiyun enum emulation_result kvm_mips_emulate_tlbmod(u32 cause,
2585*4882a593Smuzhiyun 					      u32 *opc,
2586*4882a593Smuzhiyun 					      struct kvm_vcpu *vcpu)
2587*4882a593Smuzhiyun {
2588*4882a593Smuzhiyun 	struct mips_coproc *cop0 = vcpu->arch.cop0;
2589*4882a593Smuzhiyun 	unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) |
2590*4882a593Smuzhiyun 			(kvm_read_c0_guest_entryhi(cop0) & KVM_ENTRYHI_ASID);
2591*4882a593Smuzhiyun 	struct kvm_vcpu_arch *arch = &vcpu->arch;
2592*4882a593Smuzhiyun 
2593*4882a593Smuzhiyun 	if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
2594*4882a593Smuzhiyun 		/* save old pc */
2595*4882a593Smuzhiyun 		kvm_write_c0_guest_epc(cop0, arch->pc);
2596*4882a593Smuzhiyun 		kvm_set_c0_guest_status(cop0, ST0_EXL);
2597*4882a593Smuzhiyun 
2598*4882a593Smuzhiyun 		if (cause & CAUSEF_BD)
2599*4882a593Smuzhiyun 			kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
2600*4882a593Smuzhiyun 		else
2601*4882a593Smuzhiyun 			kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
2602*4882a593Smuzhiyun 
2603*4882a593Smuzhiyun 		kvm_debug("[EXL == 0] Delivering TLB MOD @ pc %#lx\n",
2604*4882a593Smuzhiyun 			  arch->pc);
2605*4882a593Smuzhiyun 	} else {
2606*4882a593Smuzhiyun 		kvm_debug("[EXL == 1] Delivering TLB MOD @ pc %#lx\n",
2607*4882a593Smuzhiyun 			  arch->pc);
2608*4882a593Smuzhiyun 	}
2609*4882a593Smuzhiyun 
2610*4882a593Smuzhiyun 	arch->pc = kvm_mips_guest_exception_base(vcpu) + 0x180;
2611*4882a593Smuzhiyun 
2612*4882a593Smuzhiyun 	kvm_change_c0_guest_cause(cop0, (0xff),
2613*4882a593Smuzhiyun 				  (EXCCODE_MOD << CAUSEB_EXCCODE));
2614*4882a593Smuzhiyun 
2615*4882a593Smuzhiyun 	/* setup badvaddr, context and entryhi registers for the guest */
2616*4882a593Smuzhiyun 	kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr);
2617*4882a593Smuzhiyun 	/* XXXKYMA: is the context register used by linux??? */
2618*4882a593Smuzhiyun 	kvm_write_c0_guest_entryhi(cop0, entryhi);
2619*4882a593Smuzhiyun 
2620*4882a593Smuzhiyun 	return EMULATE_DONE;
2621*4882a593Smuzhiyun }
2622*4882a593Smuzhiyun 
kvm_mips_emulate_fpu_exc(u32 cause,u32 * opc,struct kvm_vcpu * vcpu)2623*4882a593Smuzhiyun enum emulation_result kvm_mips_emulate_fpu_exc(u32 cause,
2624*4882a593Smuzhiyun 					       u32 *opc,
2625*4882a593Smuzhiyun 					       struct kvm_vcpu *vcpu)
2626*4882a593Smuzhiyun {
2627*4882a593Smuzhiyun 	struct mips_coproc *cop0 = vcpu->arch.cop0;
2628*4882a593Smuzhiyun 	struct kvm_vcpu_arch *arch = &vcpu->arch;
2629*4882a593Smuzhiyun 
2630*4882a593Smuzhiyun 	if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
2631*4882a593Smuzhiyun 		/* save old pc */
2632*4882a593Smuzhiyun 		kvm_write_c0_guest_epc(cop0, arch->pc);
2633*4882a593Smuzhiyun 		kvm_set_c0_guest_status(cop0, ST0_EXL);
2634*4882a593Smuzhiyun 
2635*4882a593Smuzhiyun 		if (cause & CAUSEF_BD)
2636*4882a593Smuzhiyun 			kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
2637*4882a593Smuzhiyun 		else
2638*4882a593Smuzhiyun 			kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
2639*4882a593Smuzhiyun 
2640*4882a593Smuzhiyun 	}
2641*4882a593Smuzhiyun 
2642*4882a593Smuzhiyun 	arch->pc = kvm_mips_guest_exception_base(vcpu) + 0x180;
2643*4882a593Smuzhiyun 
2644*4882a593Smuzhiyun 	kvm_change_c0_guest_cause(cop0, (0xff),
2645*4882a593Smuzhiyun 				  (EXCCODE_CPU << CAUSEB_EXCCODE));
2646*4882a593Smuzhiyun 	kvm_change_c0_guest_cause(cop0, (CAUSEF_CE), (0x1 << CAUSEB_CE));
2647*4882a593Smuzhiyun 
2648*4882a593Smuzhiyun 	return EMULATE_DONE;
2649*4882a593Smuzhiyun }
2650*4882a593Smuzhiyun 
kvm_mips_emulate_ri_exc(u32 cause,u32 * opc,struct kvm_vcpu * vcpu)2651*4882a593Smuzhiyun enum emulation_result kvm_mips_emulate_ri_exc(u32 cause,
2652*4882a593Smuzhiyun 					      u32 *opc,
2653*4882a593Smuzhiyun 					      struct kvm_vcpu *vcpu)
2654*4882a593Smuzhiyun {
2655*4882a593Smuzhiyun 	struct mips_coproc *cop0 = vcpu->arch.cop0;
2656*4882a593Smuzhiyun 	struct kvm_vcpu_arch *arch = &vcpu->arch;
2657*4882a593Smuzhiyun 	enum emulation_result er = EMULATE_DONE;
2658*4882a593Smuzhiyun 
2659*4882a593Smuzhiyun 	if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
2660*4882a593Smuzhiyun 		/* save old pc */
2661*4882a593Smuzhiyun 		kvm_write_c0_guest_epc(cop0, arch->pc);
2662*4882a593Smuzhiyun 		kvm_set_c0_guest_status(cop0, ST0_EXL);
2663*4882a593Smuzhiyun 
2664*4882a593Smuzhiyun 		if (cause & CAUSEF_BD)
2665*4882a593Smuzhiyun 			kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
2666*4882a593Smuzhiyun 		else
2667*4882a593Smuzhiyun 			kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
2668*4882a593Smuzhiyun 
2669*4882a593Smuzhiyun 		kvm_debug("Delivering RI @ pc %#lx\n", arch->pc);
2670*4882a593Smuzhiyun 
2671*4882a593Smuzhiyun 		kvm_change_c0_guest_cause(cop0, (0xff),
2672*4882a593Smuzhiyun 					  (EXCCODE_RI << CAUSEB_EXCCODE));
2673*4882a593Smuzhiyun 
2674*4882a593Smuzhiyun 		/* Set PC to the exception entry point */
2675*4882a593Smuzhiyun 		arch->pc = kvm_mips_guest_exception_base(vcpu) + 0x180;
2676*4882a593Smuzhiyun 
2677*4882a593Smuzhiyun 	} else {
2678*4882a593Smuzhiyun 		kvm_err("Trying to deliver RI when EXL is already set\n");
2679*4882a593Smuzhiyun 		er = EMULATE_FAIL;
2680*4882a593Smuzhiyun 	}
2681*4882a593Smuzhiyun 
2682*4882a593Smuzhiyun 	return er;
2683*4882a593Smuzhiyun }
2684*4882a593Smuzhiyun 
kvm_mips_emulate_bp_exc(u32 cause,u32 * opc,struct kvm_vcpu * vcpu)2685*4882a593Smuzhiyun enum emulation_result kvm_mips_emulate_bp_exc(u32 cause,
2686*4882a593Smuzhiyun 					      u32 *opc,
2687*4882a593Smuzhiyun 					      struct kvm_vcpu *vcpu)
2688*4882a593Smuzhiyun {
2689*4882a593Smuzhiyun 	struct mips_coproc *cop0 = vcpu->arch.cop0;
2690*4882a593Smuzhiyun 	struct kvm_vcpu_arch *arch = &vcpu->arch;
2691*4882a593Smuzhiyun 	enum emulation_result er = EMULATE_DONE;
2692*4882a593Smuzhiyun 
2693*4882a593Smuzhiyun 	if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
2694*4882a593Smuzhiyun 		/* save old pc */
2695*4882a593Smuzhiyun 		kvm_write_c0_guest_epc(cop0, arch->pc);
2696*4882a593Smuzhiyun 		kvm_set_c0_guest_status(cop0, ST0_EXL);
2697*4882a593Smuzhiyun 
2698*4882a593Smuzhiyun 		if (cause & CAUSEF_BD)
2699*4882a593Smuzhiyun 			kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
2700*4882a593Smuzhiyun 		else
2701*4882a593Smuzhiyun 			kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
2702*4882a593Smuzhiyun 
2703*4882a593Smuzhiyun 		kvm_debug("Delivering BP @ pc %#lx\n", arch->pc);
2704*4882a593Smuzhiyun 
2705*4882a593Smuzhiyun 		kvm_change_c0_guest_cause(cop0, (0xff),
2706*4882a593Smuzhiyun 					  (EXCCODE_BP << CAUSEB_EXCCODE));
2707*4882a593Smuzhiyun 
2708*4882a593Smuzhiyun 		/* Set PC to the exception entry point */
2709*4882a593Smuzhiyun 		arch->pc = kvm_mips_guest_exception_base(vcpu) + 0x180;
2710*4882a593Smuzhiyun 
2711*4882a593Smuzhiyun 	} else {
2712*4882a593Smuzhiyun 		kvm_err("Trying to deliver BP when EXL is already set\n");
2713*4882a593Smuzhiyun 		er = EMULATE_FAIL;
2714*4882a593Smuzhiyun 	}
2715*4882a593Smuzhiyun 
2716*4882a593Smuzhiyun 	return er;
2717*4882a593Smuzhiyun }
2718*4882a593Smuzhiyun 
kvm_mips_emulate_trap_exc(u32 cause,u32 * opc,struct kvm_vcpu * vcpu)2719*4882a593Smuzhiyun enum emulation_result kvm_mips_emulate_trap_exc(u32 cause,
2720*4882a593Smuzhiyun 						u32 *opc,
2721*4882a593Smuzhiyun 						struct kvm_vcpu *vcpu)
2722*4882a593Smuzhiyun {
2723*4882a593Smuzhiyun 	struct mips_coproc *cop0 = vcpu->arch.cop0;
2724*4882a593Smuzhiyun 	struct kvm_vcpu_arch *arch = &vcpu->arch;
2725*4882a593Smuzhiyun 	enum emulation_result er = EMULATE_DONE;
2726*4882a593Smuzhiyun 
2727*4882a593Smuzhiyun 	if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
2728*4882a593Smuzhiyun 		/* save old pc */
2729*4882a593Smuzhiyun 		kvm_write_c0_guest_epc(cop0, arch->pc);
2730*4882a593Smuzhiyun 		kvm_set_c0_guest_status(cop0, ST0_EXL);
2731*4882a593Smuzhiyun 
2732*4882a593Smuzhiyun 		if (cause & CAUSEF_BD)
2733*4882a593Smuzhiyun 			kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
2734*4882a593Smuzhiyun 		else
2735*4882a593Smuzhiyun 			kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
2736*4882a593Smuzhiyun 
2737*4882a593Smuzhiyun 		kvm_debug("Delivering TRAP @ pc %#lx\n", arch->pc);
2738*4882a593Smuzhiyun 
2739*4882a593Smuzhiyun 		kvm_change_c0_guest_cause(cop0, (0xff),
2740*4882a593Smuzhiyun 					  (EXCCODE_TR << CAUSEB_EXCCODE));
2741*4882a593Smuzhiyun 
2742*4882a593Smuzhiyun 		/* Set PC to the exception entry point */
2743*4882a593Smuzhiyun 		arch->pc = kvm_mips_guest_exception_base(vcpu) + 0x180;
2744*4882a593Smuzhiyun 
2745*4882a593Smuzhiyun 	} else {
2746*4882a593Smuzhiyun 		kvm_err("Trying to deliver TRAP when EXL is already set\n");
2747*4882a593Smuzhiyun 		er = EMULATE_FAIL;
2748*4882a593Smuzhiyun 	}
2749*4882a593Smuzhiyun 
2750*4882a593Smuzhiyun 	return er;
2751*4882a593Smuzhiyun }
2752*4882a593Smuzhiyun 
kvm_mips_emulate_msafpe_exc(u32 cause,u32 * opc,struct kvm_vcpu * vcpu)2753*4882a593Smuzhiyun enum emulation_result kvm_mips_emulate_msafpe_exc(u32 cause,
2754*4882a593Smuzhiyun 						  u32 *opc,
2755*4882a593Smuzhiyun 						  struct kvm_vcpu *vcpu)
2756*4882a593Smuzhiyun {
2757*4882a593Smuzhiyun 	struct mips_coproc *cop0 = vcpu->arch.cop0;
2758*4882a593Smuzhiyun 	struct kvm_vcpu_arch *arch = &vcpu->arch;
2759*4882a593Smuzhiyun 	enum emulation_result er = EMULATE_DONE;
2760*4882a593Smuzhiyun 
2761*4882a593Smuzhiyun 	if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
2762*4882a593Smuzhiyun 		/* save old pc */
2763*4882a593Smuzhiyun 		kvm_write_c0_guest_epc(cop0, arch->pc);
2764*4882a593Smuzhiyun 		kvm_set_c0_guest_status(cop0, ST0_EXL);
2765*4882a593Smuzhiyun 
2766*4882a593Smuzhiyun 		if (cause & CAUSEF_BD)
2767*4882a593Smuzhiyun 			kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
2768*4882a593Smuzhiyun 		else
2769*4882a593Smuzhiyun 			kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
2770*4882a593Smuzhiyun 
2771*4882a593Smuzhiyun 		kvm_debug("Delivering MSAFPE @ pc %#lx\n", arch->pc);
2772*4882a593Smuzhiyun 
2773*4882a593Smuzhiyun 		kvm_change_c0_guest_cause(cop0, (0xff),
2774*4882a593Smuzhiyun 					  (EXCCODE_MSAFPE << CAUSEB_EXCCODE));
2775*4882a593Smuzhiyun 
2776*4882a593Smuzhiyun 		/* Set PC to the exception entry point */
2777*4882a593Smuzhiyun 		arch->pc = kvm_mips_guest_exception_base(vcpu) + 0x180;
2778*4882a593Smuzhiyun 
2779*4882a593Smuzhiyun 	} else {
2780*4882a593Smuzhiyun 		kvm_err("Trying to deliver MSAFPE when EXL is already set\n");
2781*4882a593Smuzhiyun 		er = EMULATE_FAIL;
2782*4882a593Smuzhiyun 	}
2783*4882a593Smuzhiyun 
2784*4882a593Smuzhiyun 	return er;
2785*4882a593Smuzhiyun }
2786*4882a593Smuzhiyun 
kvm_mips_emulate_fpe_exc(u32 cause,u32 * opc,struct kvm_vcpu * vcpu)2787*4882a593Smuzhiyun enum emulation_result kvm_mips_emulate_fpe_exc(u32 cause,
2788*4882a593Smuzhiyun 					       u32 *opc,
2789*4882a593Smuzhiyun 					       struct kvm_vcpu *vcpu)
2790*4882a593Smuzhiyun {
2791*4882a593Smuzhiyun 	struct mips_coproc *cop0 = vcpu->arch.cop0;
2792*4882a593Smuzhiyun 	struct kvm_vcpu_arch *arch = &vcpu->arch;
2793*4882a593Smuzhiyun 	enum emulation_result er = EMULATE_DONE;
2794*4882a593Smuzhiyun 
2795*4882a593Smuzhiyun 	if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
2796*4882a593Smuzhiyun 		/* save old pc */
2797*4882a593Smuzhiyun 		kvm_write_c0_guest_epc(cop0, arch->pc);
2798*4882a593Smuzhiyun 		kvm_set_c0_guest_status(cop0, ST0_EXL);
2799*4882a593Smuzhiyun 
2800*4882a593Smuzhiyun 		if (cause & CAUSEF_BD)
2801*4882a593Smuzhiyun 			kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
2802*4882a593Smuzhiyun 		else
2803*4882a593Smuzhiyun 			kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
2804*4882a593Smuzhiyun 
2805*4882a593Smuzhiyun 		kvm_debug("Delivering FPE @ pc %#lx\n", arch->pc);
2806*4882a593Smuzhiyun 
2807*4882a593Smuzhiyun 		kvm_change_c0_guest_cause(cop0, (0xff),
2808*4882a593Smuzhiyun 					  (EXCCODE_FPE << CAUSEB_EXCCODE));
2809*4882a593Smuzhiyun 
2810*4882a593Smuzhiyun 		/* Set PC to the exception entry point */
2811*4882a593Smuzhiyun 		arch->pc = kvm_mips_guest_exception_base(vcpu) + 0x180;
2812*4882a593Smuzhiyun 
2813*4882a593Smuzhiyun 	} else {
2814*4882a593Smuzhiyun 		kvm_err("Trying to deliver FPE when EXL is already set\n");
2815*4882a593Smuzhiyun 		er = EMULATE_FAIL;
2816*4882a593Smuzhiyun 	}
2817*4882a593Smuzhiyun 
2818*4882a593Smuzhiyun 	return er;
2819*4882a593Smuzhiyun }
2820*4882a593Smuzhiyun 
kvm_mips_emulate_msadis_exc(u32 cause,u32 * opc,struct kvm_vcpu * vcpu)2821*4882a593Smuzhiyun enum emulation_result kvm_mips_emulate_msadis_exc(u32 cause,
2822*4882a593Smuzhiyun 						  u32 *opc,
2823*4882a593Smuzhiyun 						  struct kvm_vcpu *vcpu)
2824*4882a593Smuzhiyun {
2825*4882a593Smuzhiyun 	struct mips_coproc *cop0 = vcpu->arch.cop0;
2826*4882a593Smuzhiyun 	struct kvm_vcpu_arch *arch = &vcpu->arch;
2827*4882a593Smuzhiyun 	enum emulation_result er = EMULATE_DONE;
2828*4882a593Smuzhiyun 
2829*4882a593Smuzhiyun 	if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
2830*4882a593Smuzhiyun 		/* save old pc */
2831*4882a593Smuzhiyun 		kvm_write_c0_guest_epc(cop0, arch->pc);
2832*4882a593Smuzhiyun 		kvm_set_c0_guest_status(cop0, ST0_EXL);
2833*4882a593Smuzhiyun 
2834*4882a593Smuzhiyun 		if (cause & CAUSEF_BD)
2835*4882a593Smuzhiyun 			kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
2836*4882a593Smuzhiyun 		else
2837*4882a593Smuzhiyun 			kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
2838*4882a593Smuzhiyun 
2839*4882a593Smuzhiyun 		kvm_debug("Delivering MSADIS @ pc %#lx\n", arch->pc);
2840*4882a593Smuzhiyun 
2841*4882a593Smuzhiyun 		kvm_change_c0_guest_cause(cop0, (0xff),
2842*4882a593Smuzhiyun 					  (EXCCODE_MSADIS << CAUSEB_EXCCODE));
2843*4882a593Smuzhiyun 
2844*4882a593Smuzhiyun 		/* Set PC to the exception entry point */
2845*4882a593Smuzhiyun 		arch->pc = kvm_mips_guest_exception_base(vcpu) + 0x180;
2846*4882a593Smuzhiyun 
2847*4882a593Smuzhiyun 	} else {
2848*4882a593Smuzhiyun 		kvm_err("Trying to deliver MSADIS when EXL is already set\n");
2849*4882a593Smuzhiyun 		er = EMULATE_FAIL;
2850*4882a593Smuzhiyun 	}
2851*4882a593Smuzhiyun 
2852*4882a593Smuzhiyun 	return er;
2853*4882a593Smuzhiyun }
2854*4882a593Smuzhiyun 
kvm_mips_handle_ri(u32 cause,u32 * opc,struct kvm_vcpu * vcpu)2855*4882a593Smuzhiyun enum emulation_result kvm_mips_handle_ri(u32 cause, u32 *opc,
2856*4882a593Smuzhiyun 					 struct kvm_vcpu *vcpu)
2857*4882a593Smuzhiyun {
2858*4882a593Smuzhiyun 	struct mips_coproc *cop0 = vcpu->arch.cop0;
2859*4882a593Smuzhiyun 	struct kvm_vcpu_arch *arch = &vcpu->arch;
2860*4882a593Smuzhiyun 	enum emulation_result er = EMULATE_DONE;
2861*4882a593Smuzhiyun 	unsigned long curr_pc;
2862*4882a593Smuzhiyun 	union mips_instruction inst;
2863*4882a593Smuzhiyun 	int err;
2864*4882a593Smuzhiyun 
2865*4882a593Smuzhiyun 	/*
2866*4882a593Smuzhiyun 	 * Update PC and hold onto current PC in case there is
2867*4882a593Smuzhiyun 	 * an error and we want to rollback the PC
2868*4882a593Smuzhiyun 	 */
2869*4882a593Smuzhiyun 	curr_pc = vcpu->arch.pc;
2870*4882a593Smuzhiyun 	er = update_pc(vcpu, cause);
2871*4882a593Smuzhiyun 	if (er == EMULATE_FAIL)
2872*4882a593Smuzhiyun 		return er;
2873*4882a593Smuzhiyun 
2874*4882a593Smuzhiyun 	/* Fetch the instruction. */
2875*4882a593Smuzhiyun 	if (cause & CAUSEF_BD)
2876*4882a593Smuzhiyun 		opc += 1;
2877*4882a593Smuzhiyun 	err = kvm_get_badinstr(opc, vcpu, &inst.word);
2878*4882a593Smuzhiyun 	if (err) {
2879*4882a593Smuzhiyun 		kvm_err("%s: Cannot get inst @ %p (%d)\n", __func__, opc, err);
2880*4882a593Smuzhiyun 		return EMULATE_FAIL;
2881*4882a593Smuzhiyun 	}
2882*4882a593Smuzhiyun 
2883*4882a593Smuzhiyun 	if (inst.r_format.opcode == spec3_op &&
2884*4882a593Smuzhiyun 	    inst.r_format.func == rdhwr_op &&
2885*4882a593Smuzhiyun 	    inst.r_format.rs == 0 &&
2886*4882a593Smuzhiyun 	    (inst.r_format.re >> 3) == 0) {
2887*4882a593Smuzhiyun 		int usermode = !KVM_GUEST_KERNEL_MODE(vcpu);
2888*4882a593Smuzhiyun 		int rd = inst.r_format.rd;
2889*4882a593Smuzhiyun 		int rt = inst.r_format.rt;
2890*4882a593Smuzhiyun 		int sel = inst.r_format.re & 0x7;
2891*4882a593Smuzhiyun 
2892*4882a593Smuzhiyun 		/* If usermode, check RDHWR rd is allowed by guest HWREna */
2893*4882a593Smuzhiyun 		if (usermode && !(kvm_read_c0_guest_hwrena(cop0) & BIT(rd))) {
2894*4882a593Smuzhiyun 			kvm_debug("RDHWR %#x disallowed by HWREna @ %p\n",
2895*4882a593Smuzhiyun 				  rd, opc);
2896*4882a593Smuzhiyun 			goto emulate_ri;
2897*4882a593Smuzhiyun 		}
2898*4882a593Smuzhiyun 		switch (rd) {
2899*4882a593Smuzhiyun 		case MIPS_HWR_CPUNUM:		/* CPU number */
2900*4882a593Smuzhiyun 			arch->gprs[rt] = vcpu->vcpu_id;
2901*4882a593Smuzhiyun 			break;
2902*4882a593Smuzhiyun 		case MIPS_HWR_SYNCISTEP:	/* SYNCI length */
2903*4882a593Smuzhiyun 			arch->gprs[rt] = min(current_cpu_data.dcache.linesz,
2904*4882a593Smuzhiyun 					     current_cpu_data.icache.linesz);
2905*4882a593Smuzhiyun 			break;
2906*4882a593Smuzhiyun 		case MIPS_HWR_CC:		/* Read count register */
2907*4882a593Smuzhiyun 			arch->gprs[rt] = (s32)kvm_mips_read_count(vcpu);
2908*4882a593Smuzhiyun 			break;
2909*4882a593Smuzhiyun 		case MIPS_HWR_CCRES:		/* Count register resolution */
2910*4882a593Smuzhiyun 			switch (current_cpu_data.cputype) {
2911*4882a593Smuzhiyun 			case CPU_20KC:
2912*4882a593Smuzhiyun 			case CPU_25KF:
2913*4882a593Smuzhiyun 				arch->gprs[rt] = 1;
2914*4882a593Smuzhiyun 				break;
2915*4882a593Smuzhiyun 			default:
2916*4882a593Smuzhiyun 				arch->gprs[rt] = 2;
2917*4882a593Smuzhiyun 			}
2918*4882a593Smuzhiyun 			break;
2919*4882a593Smuzhiyun 		case MIPS_HWR_ULR:		/* Read UserLocal register */
2920*4882a593Smuzhiyun 			arch->gprs[rt] = kvm_read_c0_guest_userlocal(cop0);
2921*4882a593Smuzhiyun 			break;
2922*4882a593Smuzhiyun 
2923*4882a593Smuzhiyun 		default:
2924*4882a593Smuzhiyun 			kvm_debug("RDHWR %#x not supported @ %p\n", rd, opc);
2925*4882a593Smuzhiyun 			goto emulate_ri;
2926*4882a593Smuzhiyun 		}
2927*4882a593Smuzhiyun 
2928*4882a593Smuzhiyun 		trace_kvm_hwr(vcpu, KVM_TRACE_RDHWR, KVM_TRACE_HWR(rd, sel),
2929*4882a593Smuzhiyun 			      vcpu->arch.gprs[rt]);
2930*4882a593Smuzhiyun 	} else {
2931*4882a593Smuzhiyun 		kvm_debug("Emulate RI not supported @ %p: %#x\n",
2932*4882a593Smuzhiyun 			  opc, inst.word);
2933*4882a593Smuzhiyun 		goto emulate_ri;
2934*4882a593Smuzhiyun 	}
2935*4882a593Smuzhiyun 
2936*4882a593Smuzhiyun 	return EMULATE_DONE;
2937*4882a593Smuzhiyun 
2938*4882a593Smuzhiyun emulate_ri:
2939*4882a593Smuzhiyun 	/*
2940*4882a593Smuzhiyun 	 * Rollback PC (if in branch delay slot then the PC already points to
2941*4882a593Smuzhiyun 	 * branch target), and pass the RI exception to the guest OS.
2942*4882a593Smuzhiyun 	 */
2943*4882a593Smuzhiyun 	vcpu->arch.pc = curr_pc;
2944*4882a593Smuzhiyun 	return kvm_mips_emulate_ri_exc(cause, opc, vcpu);
2945*4882a593Smuzhiyun }
2946*4882a593Smuzhiyun 
kvm_mips_complete_mmio_load(struct kvm_vcpu * vcpu)2947*4882a593Smuzhiyun enum emulation_result kvm_mips_complete_mmio_load(struct kvm_vcpu *vcpu)
2948*4882a593Smuzhiyun {
2949*4882a593Smuzhiyun 	struct kvm_run *run = vcpu->run;
2950*4882a593Smuzhiyun 	unsigned long *gpr = &vcpu->arch.gprs[vcpu->arch.io_gpr];
2951*4882a593Smuzhiyun 	enum emulation_result er = EMULATE_DONE;
2952*4882a593Smuzhiyun 
2953*4882a593Smuzhiyun 	if (run->mmio.len > sizeof(*gpr)) {
2954*4882a593Smuzhiyun 		kvm_err("Bad MMIO length: %d", run->mmio.len);
2955*4882a593Smuzhiyun 		er = EMULATE_FAIL;
2956*4882a593Smuzhiyun 		goto done;
2957*4882a593Smuzhiyun 	}
2958*4882a593Smuzhiyun 
2959*4882a593Smuzhiyun 	/* Restore saved resume PC */
2960*4882a593Smuzhiyun 	vcpu->arch.pc = vcpu->arch.io_pc;
2961*4882a593Smuzhiyun 
2962*4882a593Smuzhiyun 	switch (run->mmio.len) {
2963*4882a593Smuzhiyun 	case 8:
2964*4882a593Smuzhiyun 		switch (vcpu->mmio_needed) {
2965*4882a593Smuzhiyun 		case 11:
2966*4882a593Smuzhiyun 			*gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffffffffffffff) |
2967*4882a593Smuzhiyun 				(((*(s64 *)run->mmio.data) & 0xff) << 56);
2968*4882a593Smuzhiyun 			break;
2969*4882a593Smuzhiyun 		case 12:
2970*4882a593Smuzhiyun 			*gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffffffffffff) |
2971*4882a593Smuzhiyun 				(((*(s64 *)run->mmio.data) & 0xffff) << 48);
2972*4882a593Smuzhiyun 			break;
2973*4882a593Smuzhiyun 		case 13:
2974*4882a593Smuzhiyun 			*gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffffffffff) |
2975*4882a593Smuzhiyun 				(((*(s64 *)run->mmio.data) & 0xffffff) << 40);
2976*4882a593Smuzhiyun 			break;
2977*4882a593Smuzhiyun 		case 14:
2978*4882a593Smuzhiyun 			*gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffffffff) |
2979*4882a593Smuzhiyun 				(((*(s64 *)run->mmio.data) & 0xffffffff) << 32);
2980*4882a593Smuzhiyun 			break;
2981*4882a593Smuzhiyun 		case 15:
2982*4882a593Smuzhiyun 			*gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffffff) |
2983*4882a593Smuzhiyun 				(((*(s64 *)run->mmio.data) & 0xffffffffff) << 24);
2984*4882a593Smuzhiyun 			break;
2985*4882a593Smuzhiyun 		case 16:
2986*4882a593Smuzhiyun 			*gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffff) |
2987*4882a593Smuzhiyun 				(((*(s64 *)run->mmio.data) & 0xffffffffffff) << 16);
2988*4882a593Smuzhiyun 			break;
2989*4882a593Smuzhiyun 		case 17:
2990*4882a593Smuzhiyun 			*gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xff) |
2991*4882a593Smuzhiyun 				(((*(s64 *)run->mmio.data) & 0xffffffffffffff) << 8);
2992*4882a593Smuzhiyun 			break;
2993*4882a593Smuzhiyun 		case 18:
2994*4882a593Smuzhiyun 		case 19:
2995*4882a593Smuzhiyun 			*gpr = *(s64 *)run->mmio.data;
2996*4882a593Smuzhiyun 			break;
2997*4882a593Smuzhiyun 		case 20:
2998*4882a593Smuzhiyun 			*gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xff00000000000000) |
2999*4882a593Smuzhiyun 				((((*(s64 *)run->mmio.data)) >> 8) & 0xffffffffffffff);
3000*4882a593Smuzhiyun 			break;
3001*4882a593Smuzhiyun 		case 21:
3002*4882a593Smuzhiyun 			*gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffff000000000000) |
3003*4882a593Smuzhiyun 				((((*(s64 *)run->mmio.data)) >> 16) & 0xffffffffffff);
3004*4882a593Smuzhiyun 			break;
3005*4882a593Smuzhiyun 		case 22:
3006*4882a593Smuzhiyun 			*gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffffff0000000000) |
3007*4882a593Smuzhiyun 				((((*(s64 *)run->mmio.data)) >> 24) & 0xffffffffff);
3008*4882a593Smuzhiyun 			break;
3009*4882a593Smuzhiyun 		case 23:
3010*4882a593Smuzhiyun 			*gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffffffff00000000) |
3011*4882a593Smuzhiyun 				((((*(s64 *)run->mmio.data)) >> 32) & 0xffffffff);
3012*4882a593Smuzhiyun 			break;
3013*4882a593Smuzhiyun 		case 24:
3014*4882a593Smuzhiyun 			*gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffffffffff000000) |
3015*4882a593Smuzhiyun 				((((*(s64 *)run->mmio.data)) >> 40) & 0xffffff);
3016*4882a593Smuzhiyun 			break;
3017*4882a593Smuzhiyun 		case 25:
3018*4882a593Smuzhiyun 			*gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffffffffffff0000) |
3019*4882a593Smuzhiyun 				((((*(s64 *)run->mmio.data)) >> 48) & 0xffff);
3020*4882a593Smuzhiyun 			break;
3021*4882a593Smuzhiyun 		case 26:
3022*4882a593Smuzhiyun 			*gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffffffffffffff00) |
3023*4882a593Smuzhiyun 				((((*(s64 *)run->mmio.data)) >> 56) & 0xff);
3024*4882a593Smuzhiyun 			break;
3025*4882a593Smuzhiyun 		default:
3026*4882a593Smuzhiyun 			*gpr = *(s64 *)run->mmio.data;
3027*4882a593Smuzhiyun 		}
3028*4882a593Smuzhiyun 		break;
3029*4882a593Smuzhiyun 
3030*4882a593Smuzhiyun 	case 4:
3031*4882a593Smuzhiyun 		switch (vcpu->mmio_needed) {
3032*4882a593Smuzhiyun 		case 1:
3033*4882a593Smuzhiyun 			*gpr = *(u32 *)run->mmio.data;
3034*4882a593Smuzhiyun 			break;
3035*4882a593Smuzhiyun 		case 2:
3036*4882a593Smuzhiyun 			*gpr = *(s32 *)run->mmio.data;
3037*4882a593Smuzhiyun 			break;
3038*4882a593Smuzhiyun 		case 3:
3039*4882a593Smuzhiyun 			*gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffffff) |
3040*4882a593Smuzhiyun 				(((*(s32 *)run->mmio.data) & 0xff) << 24);
3041*4882a593Smuzhiyun 			break;
3042*4882a593Smuzhiyun 		case 4:
3043*4882a593Smuzhiyun 			*gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffff) |
3044*4882a593Smuzhiyun 				(((*(s32 *)run->mmio.data) & 0xffff) << 16);
3045*4882a593Smuzhiyun 			break;
3046*4882a593Smuzhiyun 		case 5:
3047*4882a593Smuzhiyun 			*gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xff) |
3048*4882a593Smuzhiyun 				(((*(s32 *)run->mmio.data) & 0xffffff) << 8);
3049*4882a593Smuzhiyun 			break;
3050*4882a593Smuzhiyun 		case 6:
3051*4882a593Smuzhiyun 		case 7:
3052*4882a593Smuzhiyun 			*gpr = *(s32 *)run->mmio.data;
3053*4882a593Smuzhiyun 			break;
3054*4882a593Smuzhiyun 		case 8:
3055*4882a593Smuzhiyun 			*gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xff000000) |
3056*4882a593Smuzhiyun 				((((*(s32 *)run->mmio.data)) >> 8) & 0xffffff);
3057*4882a593Smuzhiyun 			break;
3058*4882a593Smuzhiyun 		case 9:
3059*4882a593Smuzhiyun 			*gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffff0000) |
3060*4882a593Smuzhiyun 				((((*(s32 *)run->mmio.data)) >> 16) & 0xffff);
3061*4882a593Smuzhiyun 			break;
3062*4882a593Smuzhiyun 		case 10:
3063*4882a593Smuzhiyun 			*gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffffff00) |
3064*4882a593Smuzhiyun 				((((*(s32 *)run->mmio.data)) >> 24) & 0xff);
3065*4882a593Smuzhiyun 			break;
3066*4882a593Smuzhiyun 		default:
3067*4882a593Smuzhiyun 			*gpr = *(s32 *)run->mmio.data;
3068*4882a593Smuzhiyun 		}
3069*4882a593Smuzhiyun 		break;
3070*4882a593Smuzhiyun 
3071*4882a593Smuzhiyun 	case 2:
3072*4882a593Smuzhiyun 		if (vcpu->mmio_needed == 1)
3073*4882a593Smuzhiyun 			*gpr = *(u16 *)run->mmio.data;
3074*4882a593Smuzhiyun 		else
3075*4882a593Smuzhiyun 			*gpr = *(s16 *)run->mmio.data;
3076*4882a593Smuzhiyun 
3077*4882a593Smuzhiyun 		break;
3078*4882a593Smuzhiyun 	case 1:
3079*4882a593Smuzhiyun 		if (vcpu->mmio_needed == 1)
3080*4882a593Smuzhiyun 			*gpr = *(u8 *)run->mmio.data;
3081*4882a593Smuzhiyun 		else
3082*4882a593Smuzhiyun 			*gpr = *(s8 *)run->mmio.data;
3083*4882a593Smuzhiyun 		break;
3084*4882a593Smuzhiyun 	}
3085*4882a593Smuzhiyun 
3086*4882a593Smuzhiyun done:
3087*4882a593Smuzhiyun 	return er;
3088*4882a593Smuzhiyun }
3089*4882a593Smuzhiyun 
kvm_mips_emulate_exc(u32 cause,u32 * opc,struct kvm_vcpu * vcpu)3090*4882a593Smuzhiyun static enum emulation_result kvm_mips_emulate_exc(u32 cause,
3091*4882a593Smuzhiyun 						  u32 *opc,
3092*4882a593Smuzhiyun 						  struct kvm_vcpu *vcpu)
3093*4882a593Smuzhiyun {
3094*4882a593Smuzhiyun 	u32 exccode = (cause >> CAUSEB_EXCCODE) & 0x1f;
3095*4882a593Smuzhiyun 	struct mips_coproc *cop0 = vcpu->arch.cop0;
3096*4882a593Smuzhiyun 	struct kvm_vcpu_arch *arch = &vcpu->arch;
3097*4882a593Smuzhiyun 	enum emulation_result er = EMULATE_DONE;
3098*4882a593Smuzhiyun 
3099*4882a593Smuzhiyun 	if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
3100*4882a593Smuzhiyun 		/* save old pc */
3101*4882a593Smuzhiyun 		kvm_write_c0_guest_epc(cop0, arch->pc);
3102*4882a593Smuzhiyun 		kvm_set_c0_guest_status(cop0, ST0_EXL);
3103*4882a593Smuzhiyun 
3104*4882a593Smuzhiyun 		if (cause & CAUSEF_BD)
3105*4882a593Smuzhiyun 			kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
3106*4882a593Smuzhiyun 		else
3107*4882a593Smuzhiyun 			kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
3108*4882a593Smuzhiyun 
3109*4882a593Smuzhiyun 		kvm_change_c0_guest_cause(cop0, (0xff),
3110*4882a593Smuzhiyun 					  (exccode << CAUSEB_EXCCODE));
3111*4882a593Smuzhiyun 
3112*4882a593Smuzhiyun 		/* Set PC to the exception entry point */
3113*4882a593Smuzhiyun 		arch->pc = kvm_mips_guest_exception_base(vcpu) + 0x180;
3114*4882a593Smuzhiyun 		kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr);
3115*4882a593Smuzhiyun 
3116*4882a593Smuzhiyun 		kvm_debug("Delivering EXC %d @ pc %#lx, badVaddr: %#lx\n",
3117*4882a593Smuzhiyun 			  exccode, kvm_read_c0_guest_epc(cop0),
3118*4882a593Smuzhiyun 			  kvm_read_c0_guest_badvaddr(cop0));
3119*4882a593Smuzhiyun 	} else {
3120*4882a593Smuzhiyun 		kvm_err("Trying to deliver EXC when EXL is already set\n");
3121*4882a593Smuzhiyun 		er = EMULATE_FAIL;
3122*4882a593Smuzhiyun 	}
3123*4882a593Smuzhiyun 
3124*4882a593Smuzhiyun 	return er;
3125*4882a593Smuzhiyun }
3126*4882a593Smuzhiyun 
kvm_mips_check_privilege(u32 cause,u32 * opc,struct kvm_vcpu * vcpu)3127*4882a593Smuzhiyun enum emulation_result kvm_mips_check_privilege(u32 cause,
3128*4882a593Smuzhiyun 					       u32 *opc,
3129*4882a593Smuzhiyun 					       struct kvm_vcpu *vcpu)
3130*4882a593Smuzhiyun {
3131*4882a593Smuzhiyun 	enum emulation_result er = EMULATE_DONE;
3132*4882a593Smuzhiyun 	u32 exccode = (cause >> CAUSEB_EXCCODE) & 0x1f;
3133*4882a593Smuzhiyun 	unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
3134*4882a593Smuzhiyun 
3135*4882a593Smuzhiyun 	int usermode = !KVM_GUEST_KERNEL_MODE(vcpu);
3136*4882a593Smuzhiyun 
3137*4882a593Smuzhiyun 	if (usermode) {
3138*4882a593Smuzhiyun 		switch (exccode) {
3139*4882a593Smuzhiyun 		case EXCCODE_INT:
3140*4882a593Smuzhiyun 		case EXCCODE_SYS:
3141*4882a593Smuzhiyun 		case EXCCODE_BP:
3142*4882a593Smuzhiyun 		case EXCCODE_RI:
3143*4882a593Smuzhiyun 		case EXCCODE_TR:
3144*4882a593Smuzhiyun 		case EXCCODE_MSAFPE:
3145*4882a593Smuzhiyun 		case EXCCODE_FPE:
3146*4882a593Smuzhiyun 		case EXCCODE_MSADIS:
3147*4882a593Smuzhiyun 			break;
3148*4882a593Smuzhiyun 
3149*4882a593Smuzhiyun 		case EXCCODE_CPU:
3150*4882a593Smuzhiyun 			if (((cause & CAUSEF_CE) >> CAUSEB_CE) == 0)
3151*4882a593Smuzhiyun 				er = EMULATE_PRIV_FAIL;
3152*4882a593Smuzhiyun 			break;
3153*4882a593Smuzhiyun 
3154*4882a593Smuzhiyun 		case EXCCODE_MOD:
3155*4882a593Smuzhiyun 			break;
3156*4882a593Smuzhiyun 
3157*4882a593Smuzhiyun 		case EXCCODE_TLBL:
3158*4882a593Smuzhiyun 			/*
3159*4882a593Smuzhiyun 			 * We we are accessing Guest kernel space, then send an
3160*4882a593Smuzhiyun 			 * address error exception to the guest
3161*4882a593Smuzhiyun 			 */
3162*4882a593Smuzhiyun 			if (badvaddr >= (unsigned long) KVM_GUEST_KSEG0) {
3163*4882a593Smuzhiyun 				kvm_debug("%s: LD MISS @ %#lx\n", __func__,
3164*4882a593Smuzhiyun 					  badvaddr);
3165*4882a593Smuzhiyun 				cause &= ~0xff;
3166*4882a593Smuzhiyun 				cause |= (EXCCODE_ADEL << CAUSEB_EXCCODE);
3167*4882a593Smuzhiyun 				er = EMULATE_PRIV_FAIL;
3168*4882a593Smuzhiyun 			}
3169*4882a593Smuzhiyun 			break;
3170*4882a593Smuzhiyun 
3171*4882a593Smuzhiyun 		case EXCCODE_TLBS:
3172*4882a593Smuzhiyun 			/*
3173*4882a593Smuzhiyun 			 * We we are accessing Guest kernel space, then send an
3174*4882a593Smuzhiyun 			 * address error exception to the guest
3175*4882a593Smuzhiyun 			 */
3176*4882a593Smuzhiyun 			if (badvaddr >= (unsigned long) KVM_GUEST_KSEG0) {
3177*4882a593Smuzhiyun 				kvm_debug("%s: ST MISS @ %#lx\n", __func__,
3178*4882a593Smuzhiyun 					  badvaddr);
3179*4882a593Smuzhiyun 				cause &= ~0xff;
3180*4882a593Smuzhiyun 				cause |= (EXCCODE_ADES << CAUSEB_EXCCODE);
3181*4882a593Smuzhiyun 				er = EMULATE_PRIV_FAIL;
3182*4882a593Smuzhiyun 			}
3183*4882a593Smuzhiyun 			break;
3184*4882a593Smuzhiyun 
3185*4882a593Smuzhiyun 		case EXCCODE_ADES:
3186*4882a593Smuzhiyun 			kvm_debug("%s: address error ST @ %#lx\n", __func__,
3187*4882a593Smuzhiyun 				  badvaddr);
3188*4882a593Smuzhiyun 			if ((badvaddr & PAGE_MASK) == KVM_GUEST_COMMPAGE_ADDR) {
3189*4882a593Smuzhiyun 				cause &= ~0xff;
3190*4882a593Smuzhiyun 				cause |= (EXCCODE_TLBS << CAUSEB_EXCCODE);
3191*4882a593Smuzhiyun 			}
3192*4882a593Smuzhiyun 			er = EMULATE_PRIV_FAIL;
3193*4882a593Smuzhiyun 			break;
3194*4882a593Smuzhiyun 		case EXCCODE_ADEL:
3195*4882a593Smuzhiyun 			kvm_debug("%s: address error LD @ %#lx\n", __func__,
3196*4882a593Smuzhiyun 				  badvaddr);
3197*4882a593Smuzhiyun 			if ((badvaddr & PAGE_MASK) == KVM_GUEST_COMMPAGE_ADDR) {
3198*4882a593Smuzhiyun 				cause &= ~0xff;
3199*4882a593Smuzhiyun 				cause |= (EXCCODE_TLBL << CAUSEB_EXCCODE);
3200*4882a593Smuzhiyun 			}
3201*4882a593Smuzhiyun 			er = EMULATE_PRIV_FAIL;
3202*4882a593Smuzhiyun 			break;
3203*4882a593Smuzhiyun 		default:
3204*4882a593Smuzhiyun 			er = EMULATE_PRIV_FAIL;
3205*4882a593Smuzhiyun 			break;
3206*4882a593Smuzhiyun 		}
3207*4882a593Smuzhiyun 	}
3208*4882a593Smuzhiyun 
3209*4882a593Smuzhiyun 	if (er == EMULATE_PRIV_FAIL)
3210*4882a593Smuzhiyun 		kvm_mips_emulate_exc(cause, opc, vcpu);
3211*4882a593Smuzhiyun 
3212*4882a593Smuzhiyun 	return er;
3213*4882a593Smuzhiyun }
3214*4882a593Smuzhiyun 
3215*4882a593Smuzhiyun /*
3216*4882a593Smuzhiyun  * User Address (UA) fault, this could happen if
3217*4882a593Smuzhiyun  * (1) TLB entry not present/valid in both Guest and shadow host TLBs, in this
3218*4882a593Smuzhiyun  *     case we pass on the fault to the guest kernel and let it handle it.
3219*4882a593Smuzhiyun  * (2) TLB entry is present in the Guest TLB but not in the shadow, in this
3220*4882a593Smuzhiyun  *     case we inject the TLB from the Guest TLB into the shadow host TLB
3221*4882a593Smuzhiyun  */
kvm_mips_handle_tlbmiss(u32 cause,u32 * opc,struct kvm_vcpu * vcpu,bool write_fault)3222*4882a593Smuzhiyun enum emulation_result kvm_mips_handle_tlbmiss(u32 cause,
3223*4882a593Smuzhiyun 					      u32 *opc,
3224*4882a593Smuzhiyun 					      struct kvm_vcpu *vcpu,
3225*4882a593Smuzhiyun 					      bool write_fault)
3226*4882a593Smuzhiyun {
3227*4882a593Smuzhiyun 	enum emulation_result er = EMULATE_DONE;
3228*4882a593Smuzhiyun 	u32 exccode = (cause >> CAUSEB_EXCCODE) & 0x1f;
3229*4882a593Smuzhiyun 	unsigned long va = vcpu->arch.host_cp0_badvaddr;
3230*4882a593Smuzhiyun 	int index;
3231*4882a593Smuzhiyun 
3232*4882a593Smuzhiyun 	kvm_debug("kvm_mips_handle_tlbmiss: badvaddr: %#lx\n",
3233*4882a593Smuzhiyun 		  vcpu->arch.host_cp0_badvaddr);
3234*4882a593Smuzhiyun 
3235*4882a593Smuzhiyun 	/*
3236*4882a593Smuzhiyun 	 * KVM would not have got the exception if this entry was valid in the
3237*4882a593Smuzhiyun 	 * shadow host TLB. Check the Guest TLB, if the entry is not there then
3238*4882a593Smuzhiyun 	 * send the guest an exception. The guest exc handler should then inject
3239*4882a593Smuzhiyun 	 * an entry into the guest TLB.
3240*4882a593Smuzhiyun 	 */
3241*4882a593Smuzhiyun 	index = kvm_mips_guest_tlb_lookup(vcpu,
3242*4882a593Smuzhiyun 		      (va & VPN2_MASK) |
3243*4882a593Smuzhiyun 		      (kvm_read_c0_guest_entryhi(vcpu->arch.cop0) &
3244*4882a593Smuzhiyun 		       KVM_ENTRYHI_ASID));
3245*4882a593Smuzhiyun 	if (index < 0) {
3246*4882a593Smuzhiyun 		if (exccode == EXCCODE_TLBL) {
3247*4882a593Smuzhiyun 			er = kvm_mips_emulate_tlbmiss_ld(cause, opc, vcpu);
3248*4882a593Smuzhiyun 		} else if (exccode == EXCCODE_TLBS) {
3249*4882a593Smuzhiyun 			er = kvm_mips_emulate_tlbmiss_st(cause, opc, vcpu);
3250*4882a593Smuzhiyun 		} else {
3251*4882a593Smuzhiyun 			kvm_err("%s: invalid exc code: %d\n", __func__,
3252*4882a593Smuzhiyun 				exccode);
3253*4882a593Smuzhiyun 			er = EMULATE_FAIL;
3254*4882a593Smuzhiyun 		}
3255*4882a593Smuzhiyun 	} else {
3256*4882a593Smuzhiyun 		struct kvm_mips_tlb *tlb = &vcpu->arch.guest_tlb[index];
3257*4882a593Smuzhiyun 
3258*4882a593Smuzhiyun 		/*
3259*4882a593Smuzhiyun 		 * Check if the entry is valid, if not then setup a TLB invalid
3260*4882a593Smuzhiyun 		 * exception to the guest
3261*4882a593Smuzhiyun 		 */
3262*4882a593Smuzhiyun 		if (!TLB_IS_VALID(*tlb, va)) {
3263*4882a593Smuzhiyun 			if (exccode == EXCCODE_TLBL) {
3264*4882a593Smuzhiyun 				er = kvm_mips_emulate_tlbinv_ld(cause, opc,
3265*4882a593Smuzhiyun 								vcpu);
3266*4882a593Smuzhiyun 			} else if (exccode == EXCCODE_TLBS) {
3267*4882a593Smuzhiyun 				er = kvm_mips_emulate_tlbinv_st(cause, opc,
3268*4882a593Smuzhiyun 								vcpu);
3269*4882a593Smuzhiyun 			} else {
3270*4882a593Smuzhiyun 				kvm_err("%s: invalid exc code: %d\n", __func__,
3271*4882a593Smuzhiyun 					exccode);
3272*4882a593Smuzhiyun 				er = EMULATE_FAIL;
3273*4882a593Smuzhiyun 			}
3274*4882a593Smuzhiyun 		} else {
3275*4882a593Smuzhiyun 			kvm_debug("Injecting hi: %#lx, lo0: %#lx, lo1: %#lx into shadow host TLB\n",
3276*4882a593Smuzhiyun 				  tlb->tlb_hi, tlb->tlb_lo[0], tlb->tlb_lo[1]);
3277*4882a593Smuzhiyun 			/*
3278*4882a593Smuzhiyun 			 * OK we have a Guest TLB entry, now inject it into the
3279*4882a593Smuzhiyun 			 * shadow host TLB
3280*4882a593Smuzhiyun 			 */
3281*4882a593Smuzhiyun 			if (kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb, va,
3282*4882a593Smuzhiyun 								 write_fault)) {
3283*4882a593Smuzhiyun 				kvm_err("%s: handling mapped seg tlb fault for %lx, index: %u, vcpu: %p, ASID: %#lx\n",
3284*4882a593Smuzhiyun 					__func__, va, index, vcpu,
3285*4882a593Smuzhiyun 					read_c0_entryhi());
3286*4882a593Smuzhiyun 				er = EMULATE_FAIL;
3287*4882a593Smuzhiyun 			}
3288*4882a593Smuzhiyun 		}
3289*4882a593Smuzhiyun 	}
3290*4882a593Smuzhiyun 
3291*4882a593Smuzhiyun 	return er;
3292*4882a593Smuzhiyun }
3293