xref: /OK3568_Linux_fs/kernel/arch/mips/kvm/entry.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun  * This file is subject to the terms and conditions of the GNU General Public
3*4882a593Smuzhiyun  * License.  See the file "COPYING" in the main directory of this archive
4*4882a593Smuzhiyun  * for more details.
5*4882a593Smuzhiyun  *
6*4882a593Smuzhiyun  * Generation of main entry point for the guest, exception handling.
7*4882a593Smuzhiyun  *
8*4882a593Smuzhiyun  * Copyright (C) 2012  MIPS Technologies, Inc.
9*4882a593Smuzhiyun  * Authors: Sanjay Lal <sanjayl@kymasys.com>
10*4882a593Smuzhiyun  *
11*4882a593Smuzhiyun  * Copyright (C) 2016 Imagination Technologies Ltd.
12*4882a593Smuzhiyun  */
13*4882a593Smuzhiyun 
14*4882a593Smuzhiyun #include <linux/kvm_host.h>
15*4882a593Smuzhiyun #include <linux/log2.h>
16*4882a593Smuzhiyun #include <asm/mmu_context.h>
17*4882a593Smuzhiyun #include <asm/msa.h>
18*4882a593Smuzhiyun #include <asm/setup.h>
19*4882a593Smuzhiyun #include <asm/tlbex.h>
20*4882a593Smuzhiyun #include <asm/uasm.h>
21*4882a593Smuzhiyun 
22*4882a593Smuzhiyun /* Register names */
23*4882a593Smuzhiyun #define ZERO		0
24*4882a593Smuzhiyun #define AT		1
25*4882a593Smuzhiyun #define V0		2
26*4882a593Smuzhiyun #define V1		3
27*4882a593Smuzhiyun #define A0		4
28*4882a593Smuzhiyun #define A1		5
29*4882a593Smuzhiyun 
30*4882a593Smuzhiyun #if _MIPS_SIM == _MIPS_SIM_ABI32
31*4882a593Smuzhiyun #define T0		8
32*4882a593Smuzhiyun #define T1		9
33*4882a593Smuzhiyun #define T2		10
34*4882a593Smuzhiyun #define T3		11
35*4882a593Smuzhiyun #endif /* _MIPS_SIM == _MIPS_SIM_ABI32 */
36*4882a593Smuzhiyun 
37*4882a593Smuzhiyun #if _MIPS_SIM == _MIPS_SIM_ABI64 || _MIPS_SIM == _MIPS_SIM_NABI32
38*4882a593Smuzhiyun #define T0		12
39*4882a593Smuzhiyun #define T1		13
40*4882a593Smuzhiyun #define T2		14
41*4882a593Smuzhiyun #define T3		15
42*4882a593Smuzhiyun #endif /* _MIPS_SIM == _MIPS_SIM_ABI64 || _MIPS_SIM == _MIPS_SIM_NABI32 */
43*4882a593Smuzhiyun 
44*4882a593Smuzhiyun #define S0		16
45*4882a593Smuzhiyun #define S1		17
46*4882a593Smuzhiyun #define T9		25
47*4882a593Smuzhiyun #define K0		26
48*4882a593Smuzhiyun #define K1		27
49*4882a593Smuzhiyun #define GP		28
50*4882a593Smuzhiyun #define SP		29
51*4882a593Smuzhiyun #define RA		31
52*4882a593Smuzhiyun 
53*4882a593Smuzhiyun /* Some CP0 registers */
54*4882a593Smuzhiyun #define C0_PWBASE	5, 5
55*4882a593Smuzhiyun #define C0_HWRENA	7, 0
56*4882a593Smuzhiyun #define C0_BADVADDR	8, 0
57*4882a593Smuzhiyun #define C0_BADINSTR	8, 1
58*4882a593Smuzhiyun #define C0_BADINSTRP	8, 2
59*4882a593Smuzhiyun #define C0_PGD		9, 7
60*4882a593Smuzhiyun #define C0_ENTRYHI	10, 0
61*4882a593Smuzhiyun #define C0_GUESTCTL1	10, 4
62*4882a593Smuzhiyun #define C0_STATUS	12, 0
63*4882a593Smuzhiyun #define C0_GUESTCTL0	12, 6
64*4882a593Smuzhiyun #define C0_CAUSE	13, 0
65*4882a593Smuzhiyun #define C0_EPC		14, 0
66*4882a593Smuzhiyun #define C0_EBASE	15, 1
67*4882a593Smuzhiyun #define C0_CONFIG5	16, 5
68*4882a593Smuzhiyun #define C0_DDATA_LO	28, 3
69*4882a593Smuzhiyun #define C0_ERROREPC	30, 0
70*4882a593Smuzhiyun 
71*4882a593Smuzhiyun #define CALLFRAME_SIZ   32
72*4882a593Smuzhiyun 
73*4882a593Smuzhiyun #ifdef CONFIG_64BIT
74*4882a593Smuzhiyun #define ST0_KX_IF_64	ST0_KX
75*4882a593Smuzhiyun #else
76*4882a593Smuzhiyun #define ST0_KX_IF_64	0
77*4882a593Smuzhiyun #endif
78*4882a593Smuzhiyun 
79*4882a593Smuzhiyun static unsigned int scratch_vcpu[2] = { C0_DDATA_LO };
80*4882a593Smuzhiyun static unsigned int scratch_tmp[2] = { C0_ERROREPC };
81*4882a593Smuzhiyun 
82*4882a593Smuzhiyun enum label_id {
83*4882a593Smuzhiyun 	label_fpu_1 = 1,
84*4882a593Smuzhiyun 	label_msa_1,
85*4882a593Smuzhiyun 	label_return_to_host,
86*4882a593Smuzhiyun 	label_kernel_asid,
87*4882a593Smuzhiyun 	label_exit_common,
88*4882a593Smuzhiyun };
89*4882a593Smuzhiyun 
90*4882a593Smuzhiyun UASM_L_LA(_fpu_1)
91*4882a593Smuzhiyun UASM_L_LA(_msa_1)
92*4882a593Smuzhiyun UASM_L_LA(_return_to_host)
93*4882a593Smuzhiyun UASM_L_LA(_kernel_asid)
94*4882a593Smuzhiyun UASM_L_LA(_exit_common)
95*4882a593Smuzhiyun 
96*4882a593Smuzhiyun static void *kvm_mips_build_enter_guest(void *addr);
97*4882a593Smuzhiyun static void *kvm_mips_build_ret_from_exit(void *addr);
98*4882a593Smuzhiyun static void *kvm_mips_build_ret_to_guest(void *addr);
99*4882a593Smuzhiyun static void *kvm_mips_build_ret_to_host(void *addr);
100*4882a593Smuzhiyun 
101*4882a593Smuzhiyun /*
102*4882a593Smuzhiyun  * The version of this function in tlbex.c uses current_cpu_type(), but for KVM
103*4882a593Smuzhiyun  * we assume symmetry.
104*4882a593Smuzhiyun  */
c0_kscratch(void)105*4882a593Smuzhiyun static int c0_kscratch(void)
106*4882a593Smuzhiyun {
107*4882a593Smuzhiyun 	switch (boot_cpu_type()) {
108*4882a593Smuzhiyun 	case CPU_XLP:
109*4882a593Smuzhiyun 	case CPU_XLR:
110*4882a593Smuzhiyun 		return 22;
111*4882a593Smuzhiyun 	default:
112*4882a593Smuzhiyun 		return 31;
113*4882a593Smuzhiyun 	}
114*4882a593Smuzhiyun }
115*4882a593Smuzhiyun 
116*4882a593Smuzhiyun /**
117*4882a593Smuzhiyun  * kvm_mips_entry_setup() - Perform global setup for entry code.
118*4882a593Smuzhiyun  *
119*4882a593Smuzhiyun  * Perform global setup for entry code, such as choosing a scratch register.
120*4882a593Smuzhiyun  *
121*4882a593Smuzhiyun  * Returns:	0 on success.
122*4882a593Smuzhiyun  *		-errno on failure.
123*4882a593Smuzhiyun  */
kvm_mips_entry_setup(void)124*4882a593Smuzhiyun int kvm_mips_entry_setup(void)
125*4882a593Smuzhiyun {
126*4882a593Smuzhiyun 	/*
127*4882a593Smuzhiyun 	 * We prefer to use KScratchN registers if they are available over the
128*4882a593Smuzhiyun 	 * defaults above, which may not work on all cores.
129*4882a593Smuzhiyun 	 */
130*4882a593Smuzhiyun 	unsigned int kscratch_mask = cpu_data[0].kscratch_mask;
131*4882a593Smuzhiyun 
132*4882a593Smuzhiyun 	if (pgd_reg != -1)
133*4882a593Smuzhiyun 		kscratch_mask &= ~BIT(pgd_reg);
134*4882a593Smuzhiyun 
135*4882a593Smuzhiyun 	/* Pick a scratch register for storing VCPU */
136*4882a593Smuzhiyun 	if (kscratch_mask) {
137*4882a593Smuzhiyun 		scratch_vcpu[0] = c0_kscratch();
138*4882a593Smuzhiyun 		scratch_vcpu[1] = ffs(kscratch_mask) - 1;
139*4882a593Smuzhiyun 		kscratch_mask &= ~BIT(scratch_vcpu[1]);
140*4882a593Smuzhiyun 	}
141*4882a593Smuzhiyun 
142*4882a593Smuzhiyun 	/* Pick a scratch register to use as a temp for saving state */
143*4882a593Smuzhiyun 	if (kscratch_mask) {
144*4882a593Smuzhiyun 		scratch_tmp[0] = c0_kscratch();
145*4882a593Smuzhiyun 		scratch_tmp[1] = ffs(kscratch_mask) - 1;
146*4882a593Smuzhiyun 		kscratch_mask &= ~BIT(scratch_tmp[1]);
147*4882a593Smuzhiyun 	}
148*4882a593Smuzhiyun 
149*4882a593Smuzhiyun 	return 0;
150*4882a593Smuzhiyun }
151*4882a593Smuzhiyun 
kvm_mips_build_save_scratch(u32 ** p,unsigned int tmp,unsigned int frame)152*4882a593Smuzhiyun static void kvm_mips_build_save_scratch(u32 **p, unsigned int tmp,
153*4882a593Smuzhiyun 					unsigned int frame)
154*4882a593Smuzhiyun {
155*4882a593Smuzhiyun 	/* Save the VCPU scratch register value in cp0_epc of the stack frame */
156*4882a593Smuzhiyun 	UASM_i_MFC0(p, tmp, scratch_vcpu[0], scratch_vcpu[1]);
157*4882a593Smuzhiyun 	UASM_i_SW(p, tmp, offsetof(struct pt_regs, cp0_epc), frame);
158*4882a593Smuzhiyun 
159*4882a593Smuzhiyun 	/* Save the temp scratch register value in cp0_cause of stack frame */
160*4882a593Smuzhiyun 	if (scratch_tmp[0] == c0_kscratch()) {
161*4882a593Smuzhiyun 		UASM_i_MFC0(p, tmp, scratch_tmp[0], scratch_tmp[1]);
162*4882a593Smuzhiyun 		UASM_i_SW(p, tmp, offsetof(struct pt_regs, cp0_cause), frame);
163*4882a593Smuzhiyun 	}
164*4882a593Smuzhiyun }
165*4882a593Smuzhiyun 
kvm_mips_build_restore_scratch(u32 ** p,unsigned int tmp,unsigned int frame)166*4882a593Smuzhiyun static void kvm_mips_build_restore_scratch(u32 **p, unsigned int tmp,
167*4882a593Smuzhiyun 					   unsigned int frame)
168*4882a593Smuzhiyun {
169*4882a593Smuzhiyun 	/*
170*4882a593Smuzhiyun 	 * Restore host scratch register values saved by
171*4882a593Smuzhiyun 	 * kvm_mips_build_save_scratch().
172*4882a593Smuzhiyun 	 */
173*4882a593Smuzhiyun 	UASM_i_LW(p, tmp, offsetof(struct pt_regs, cp0_epc), frame);
174*4882a593Smuzhiyun 	UASM_i_MTC0(p, tmp, scratch_vcpu[0], scratch_vcpu[1]);
175*4882a593Smuzhiyun 
176*4882a593Smuzhiyun 	if (scratch_tmp[0] == c0_kscratch()) {
177*4882a593Smuzhiyun 		UASM_i_LW(p, tmp, offsetof(struct pt_regs, cp0_cause), frame);
178*4882a593Smuzhiyun 		UASM_i_MTC0(p, tmp, scratch_tmp[0], scratch_tmp[1]);
179*4882a593Smuzhiyun 	}
180*4882a593Smuzhiyun }
181*4882a593Smuzhiyun 
182*4882a593Smuzhiyun /**
183*4882a593Smuzhiyun  * build_set_exc_base() - Assemble code to write exception base address.
184*4882a593Smuzhiyun  * @p:		Code buffer pointer.
185*4882a593Smuzhiyun  * @reg:	Source register (generated code may set WG bit in @reg).
186*4882a593Smuzhiyun  *
187*4882a593Smuzhiyun  * Assemble code to modify the exception base address in the EBase register,
188*4882a593Smuzhiyun  * using the appropriately sized access and setting the WG bit if necessary.
189*4882a593Smuzhiyun  */
build_set_exc_base(u32 ** p,unsigned int reg)190*4882a593Smuzhiyun static inline void build_set_exc_base(u32 **p, unsigned int reg)
191*4882a593Smuzhiyun {
192*4882a593Smuzhiyun 	if (cpu_has_ebase_wg) {
193*4882a593Smuzhiyun 		/* Set WG so that all the bits get written */
194*4882a593Smuzhiyun 		uasm_i_ori(p, reg, reg, MIPS_EBASE_WG);
195*4882a593Smuzhiyun 		UASM_i_MTC0(p, reg, C0_EBASE);
196*4882a593Smuzhiyun 	} else {
197*4882a593Smuzhiyun 		uasm_i_mtc0(p, reg, C0_EBASE);
198*4882a593Smuzhiyun 	}
199*4882a593Smuzhiyun }
200*4882a593Smuzhiyun 
201*4882a593Smuzhiyun /**
202*4882a593Smuzhiyun  * kvm_mips_build_vcpu_run() - Assemble function to start running a guest VCPU.
203*4882a593Smuzhiyun  * @addr:	Address to start writing code.
204*4882a593Smuzhiyun  *
205*4882a593Smuzhiyun  * Assemble the start of the vcpu_run function to run a guest VCPU. The function
206*4882a593Smuzhiyun  * conforms to the following prototype:
207*4882a593Smuzhiyun  *
208*4882a593Smuzhiyun  * int vcpu_run(struct kvm_vcpu *vcpu);
209*4882a593Smuzhiyun  *
210*4882a593Smuzhiyun  * The exit from the guest and return to the caller is handled by the code
211*4882a593Smuzhiyun  * generated by kvm_mips_build_ret_to_host().
212*4882a593Smuzhiyun  *
213*4882a593Smuzhiyun  * Returns:	Next address after end of written function.
214*4882a593Smuzhiyun  */
kvm_mips_build_vcpu_run(void * addr)215*4882a593Smuzhiyun void *kvm_mips_build_vcpu_run(void *addr)
216*4882a593Smuzhiyun {
217*4882a593Smuzhiyun 	u32 *p = addr;
218*4882a593Smuzhiyun 	unsigned int i;
219*4882a593Smuzhiyun 
220*4882a593Smuzhiyun 	/*
221*4882a593Smuzhiyun 	 * A0: vcpu
222*4882a593Smuzhiyun 	 */
223*4882a593Smuzhiyun 
224*4882a593Smuzhiyun 	/* k0/k1 not being used in host kernel context */
225*4882a593Smuzhiyun 	UASM_i_ADDIU(&p, K1, SP, -(int)sizeof(struct pt_regs));
226*4882a593Smuzhiyun 	for (i = 16; i < 32; ++i) {
227*4882a593Smuzhiyun 		if (i == 24)
228*4882a593Smuzhiyun 			i = 28;
229*4882a593Smuzhiyun 		UASM_i_SW(&p, i, offsetof(struct pt_regs, regs[i]), K1);
230*4882a593Smuzhiyun 	}
231*4882a593Smuzhiyun 
232*4882a593Smuzhiyun 	/* Save host status */
233*4882a593Smuzhiyun 	uasm_i_mfc0(&p, V0, C0_STATUS);
234*4882a593Smuzhiyun 	UASM_i_SW(&p, V0, offsetof(struct pt_regs, cp0_status), K1);
235*4882a593Smuzhiyun 
236*4882a593Smuzhiyun 	/* Save scratch registers, will be used to store pointer to vcpu etc */
237*4882a593Smuzhiyun 	kvm_mips_build_save_scratch(&p, V1, K1);
238*4882a593Smuzhiyun 
239*4882a593Smuzhiyun 	/* VCPU scratch register has pointer to vcpu */
240*4882a593Smuzhiyun 	UASM_i_MTC0(&p, A0, scratch_vcpu[0], scratch_vcpu[1]);
241*4882a593Smuzhiyun 
242*4882a593Smuzhiyun 	/* Offset into vcpu->arch */
243*4882a593Smuzhiyun 	UASM_i_ADDIU(&p, K1, A0, offsetof(struct kvm_vcpu, arch));
244*4882a593Smuzhiyun 
245*4882a593Smuzhiyun 	/*
246*4882a593Smuzhiyun 	 * Save the host stack to VCPU, used for exception processing
247*4882a593Smuzhiyun 	 * when we exit from the Guest
248*4882a593Smuzhiyun 	 */
249*4882a593Smuzhiyun 	UASM_i_SW(&p, SP, offsetof(struct kvm_vcpu_arch, host_stack), K1);
250*4882a593Smuzhiyun 
251*4882a593Smuzhiyun 	/* Save the kernel gp as well */
252*4882a593Smuzhiyun 	UASM_i_SW(&p, GP, offsetof(struct kvm_vcpu_arch, host_gp), K1);
253*4882a593Smuzhiyun 
254*4882a593Smuzhiyun 	/*
255*4882a593Smuzhiyun 	 * Setup status register for running the guest in UM, interrupts
256*4882a593Smuzhiyun 	 * are disabled
257*4882a593Smuzhiyun 	 */
258*4882a593Smuzhiyun 	UASM_i_LA(&p, K0, ST0_EXL | KSU_USER | ST0_BEV | ST0_KX_IF_64);
259*4882a593Smuzhiyun 	uasm_i_mtc0(&p, K0, C0_STATUS);
260*4882a593Smuzhiyun 	uasm_i_ehb(&p);
261*4882a593Smuzhiyun 
262*4882a593Smuzhiyun 	/* load up the new EBASE */
263*4882a593Smuzhiyun 	UASM_i_LW(&p, K0, offsetof(struct kvm_vcpu_arch, guest_ebase), K1);
264*4882a593Smuzhiyun 	build_set_exc_base(&p, K0);
265*4882a593Smuzhiyun 
266*4882a593Smuzhiyun 	/*
267*4882a593Smuzhiyun 	 * Now that the new EBASE has been loaded, unset BEV, set
268*4882a593Smuzhiyun 	 * interrupt mask as it was but make sure that timer interrupts
269*4882a593Smuzhiyun 	 * are enabled
270*4882a593Smuzhiyun 	 */
271*4882a593Smuzhiyun 	uasm_i_addiu(&p, K0, ZERO, ST0_EXL | KSU_USER | ST0_IE | ST0_KX_IF_64);
272*4882a593Smuzhiyun 	uasm_i_andi(&p, V0, V0, ST0_IM);
273*4882a593Smuzhiyun 	uasm_i_or(&p, K0, K0, V0);
274*4882a593Smuzhiyun 	uasm_i_mtc0(&p, K0, C0_STATUS);
275*4882a593Smuzhiyun 	uasm_i_ehb(&p);
276*4882a593Smuzhiyun 
277*4882a593Smuzhiyun 	p = kvm_mips_build_enter_guest(p);
278*4882a593Smuzhiyun 
279*4882a593Smuzhiyun 	return p;
280*4882a593Smuzhiyun }
281*4882a593Smuzhiyun 
282*4882a593Smuzhiyun /**
283*4882a593Smuzhiyun  * kvm_mips_build_enter_guest() - Assemble code to resume guest execution.
284*4882a593Smuzhiyun  * @addr:	Address to start writing code.
285*4882a593Smuzhiyun  *
286*4882a593Smuzhiyun  * Assemble the code to resume guest execution. This code is common between the
287*4882a593Smuzhiyun  * initial entry into the guest from the host, and returning from the exit
288*4882a593Smuzhiyun  * handler back to the guest.
289*4882a593Smuzhiyun  *
290*4882a593Smuzhiyun  * Returns:	Next address after end of written function.
291*4882a593Smuzhiyun  */
kvm_mips_build_enter_guest(void * addr)292*4882a593Smuzhiyun static void *kvm_mips_build_enter_guest(void *addr)
293*4882a593Smuzhiyun {
294*4882a593Smuzhiyun 	u32 *p = addr;
295*4882a593Smuzhiyun 	unsigned int i;
296*4882a593Smuzhiyun 	struct uasm_label labels[2];
297*4882a593Smuzhiyun 	struct uasm_reloc relocs[2];
298*4882a593Smuzhiyun 	struct uasm_label __maybe_unused *l = labels;
299*4882a593Smuzhiyun 	struct uasm_reloc __maybe_unused *r = relocs;
300*4882a593Smuzhiyun 
301*4882a593Smuzhiyun 	memset(labels, 0, sizeof(labels));
302*4882a593Smuzhiyun 	memset(relocs, 0, sizeof(relocs));
303*4882a593Smuzhiyun 
304*4882a593Smuzhiyun 	/* Set Guest EPC */
305*4882a593Smuzhiyun 	UASM_i_LW(&p, T0, offsetof(struct kvm_vcpu_arch, pc), K1);
306*4882a593Smuzhiyun 	UASM_i_MTC0(&p, T0, C0_EPC);
307*4882a593Smuzhiyun 
308*4882a593Smuzhiyun #ifdef CONFIG_KVM_MIPS_VZ
309*4882a593Smuzhiyun 	/* Save normal linux process pgd (VZ guarantees pgd_reg is set) */
310*4882a593Smuzhiyun 	if (cpu_has_ldpte)
311*4882a593Smuzhiyun 		UASM_i_MFC0(&p, K0, C0_PWBASE);
312*4882a593Smuzhiyun 	else
313*4882a593Smuzhiyun 		UASM_i_MFC0(&p, K0, c0_kscratch(), pgd_reg);
314*4882a593Smuzhiyun 	UASM_i_SW(&p, K0, offsetof(struct kvm_vcpu_arch, host_pgd), K1);
315*4882a593Smuzhiyun 
316*4882a593Smuzhiyun 	/*
317*4882a593Smuzhiyun 	 * Set up KVM GPA pgd.
318*4882a593Smuzhiyun 	 * This does roughly the same as TLBMISS_HANDLER_SETUP_PGD():
319*4882a593Smuzhiyun 	 * - call tlbmiss_handler_setup_pgd(mm->pgd)
320*4882a593Smuzhiyun 	 * - write mm->pgd into CP0_PWBase
321*4882a593Smuzhiyun 	 *
322*4882a593Smuzhiyun 	 * We keep S0 pointing at struct kvm so we can load the ASID below.
323*4882a593Smuzhiyun 	 */
324*4882a593Smuzhiyun 	UASM_i_LW(&p, S0, (int)offsetof(struct kvm_vcpu, kvm) -
325*4882a593Smuzhiyun 			  (int)offsetof(struct kvm_vcpu, arch), K1);
326*4882a593Smuzhiyun 	UASM_i_LW(&p, A0, offsetof(struct kvm, arch.gpa_mm.pgd), S0);
327*4882a593Smuzhiyun 	UASM_i_LA(&p, T9, (unsigned long)tlbmiss_handler_setup_pgd);
328*4882a593Smuzhiyun 	uasm_i_jalr(&p, RA, T9);
329*4882a593Smuzhiyun 	/* delay slot */
330*4882a593Smuzhiyun 	if (cpu_has_htw)
331*4882a593Smuzhiyun 		UASM_i_MTC0(&p, A0, C0_PWBASE);
332*4882a593Smuzhiyun 	else
333*4882a593Smuzhiyun 		uasm_i_nop(&p);
334*4882a593Smuzhiyun 
335*4882a593Smuzhiyun 	/* Set GM bit to setup eret to VZ guest context */
336*4882a593Smuzhiyun 	uasm_i_addiu(&p, V1, ZERO, 1);
337*4882a593Smuzhiyun 	uasm_i_mfc0(&p, K0, C0_GUESTCTL0);
338*4882a593Smuzhiyun 	uasm_i_ins(&p, K0, V1, MIPS_GCTL0_GM_SHIFT, 1);
339*4882a593Smuzhiyun 	uasm_i_mtc0(&p, K0, C0_GUESTCTL0);
340*4882a593Smuzhiyun 
341*4882a593Smuzhiyun 	if (cpu_has_guestid) {
342*4882a593Smuzhiyun 		/*
343*4882a593Smuzhiyun 		 * Set root mode GuestID, so that root TLB refill handler can
344*4882a593Smuzhiyun 		 * use the correct GuestID in the root TLB.
345*4882a593Smuzhiyun 		 */
346*4882a593Smuzhiyun 
347*4882a593Smuzhiyun 		/* Get current GuestID */
348*4882a593Smuzhiyun 		uasm_i_mfc0(&p, T0, C0_GUESTCTL1);
349*4882a593Smuzhiyun 		/* Set GuestCtl1.RID = GuestCtl1.ID */
350*4882a593Smuzhiyun 		uasm_i_ext(&p, T1, T0, MIPS_GCTL1_ID_SHIFT,
351*4882a593Smuzhiyun 			   MIPS_GCTL1_ID_WIDTH);
352*4882a593Smuzhiyun 		uasm_i_ins(&p, T0, T1, MIPS_GCTL1_RID_SHIFT,
353*4882a593Smuzhiyun 			   MIPS_GCTL1_RID_WIDTH);
354*4882a593Smuzhiyun 		uasm_i_mtc0(&p, T0, C0_GUESTCTL1);
355*4882a593Smuzhiyun 
356*4882a593Smuzhiyun 		/* GuestID handles dealiasing so we don't need to touch ASID */
357*4882a593Smuzhiyun 		goto skip_asid_restore;
358*4882a593Smuzhiyun 	}
359*4882a593Smuzhiyun 
360*4882a593Smuzhiyun 	/* Root ASID Dealias (RAD) */
361*4882a593Smuzhiyun 
362*4882a593Smuzhiyun 	/* Save host ASID */
363*4882a593Smuzhiyun 	UASM_i_MFC0(&p, K0, C0_ENTRYHI);
364*4882a593Smuzhiyun 	UASM_i_SW(&p, K0, offsetof(struct kvm_vcpu_arch, host_entryhi),
365*4882a593Smuzhiyun 		  K1);
366*4882a593Smuzhiyun 
367*4882a593Smuzhiyun 	/* Set the root ASID for the Guest */
368*4882a593Smuzhiyun 	UASM_i_ADDIU(&p, T1, S0,
369*4882a593Smuzhiyun 		     offsetof(struct kvm, arch.gpa_mm.context.asid));
370*4882a593Smuzhiyun #else
371*4882a593Smuzhiyun 	/* Set the ASID for the Guest Kernel or User */
372*4882a593Smuzhiyun 	UASM_i_LW(&p, T0, offsetof(struct kvm_vcpu_arch, cop0), K1);
373*4882a593Smuzhiyun 	UASM_i_LW(&p, T0, offsetof(struct mips_coproc, reg[MIPS_CP0_STATUS][0]),
374*4882a593Smuzhiyun 		  T0);
375*4882a593Smuzhiyun 	uasm_i_andi(&p, T0, T0, KSU_USER | ST0_ERL | ST0_EXL);
376*4882a593Smuzhiyun 	uasm_i_xori(&p, T0, T0, KSU_USER);
377*4882a593Smuzhiyun 	uasm_il_bnez(&p, &r, T0, label_kernel_asid);
378*4882a593Smuzhiyun 	 UASM_i_ADDIU(&p, T1, K1, offsetof(struct kvm_vcpu_arch,
379*4882a593Smuzhiyun 					   guest_kernel_mm.context.asid));
380*4882a593Smuzhiyun 	/* else user */
381*4882a593Smuzhiyun 	UASM_i_ADDIU(&p, T1, K1, offsetof(struct kvm_vcpu_arch,
382*4882a593Smuzhiyun 					  guest_user_mm.context.asid));
383*4882a593Smuzhiyun 	uasm_l_kernel_asid(&l, p);
384*4882a593Smuzhiyun #endif
385*4882a593Smuzhiyun 
386*4882a593Smuzhiyun 	/* t1: contains the base of the ASID array, need to get the cpu id  */
387*4882a593Smuzhiyun 	/* smp_processor_id */
388*4882a593Smuzhiyun 	uasm_i_lw(&p, T2, offsetof(struct thread_info, cpu), GP);
389*4882a593Smuzhiyun 	/* index the ASID array */
390*4882a593Smuzhiyun 	uasm_i_sll(&p, T2, T2, ilog2(sizeof(long)));
391*4882a593Smuzhiyun 	UASM_i_ADDU(&p, T3, T1, T2);
392*4882a593Smuzhiyun 	UASM_i_LW(&p, K0, 0, T3);
393*4882a593Smuzhiyun #ifdef CONFIG_MIPS_ASID_BITS_VARIABLE
394*4882a593Smuzhiyun 	/*
395*4882a593Smuzhiyun 	 * reuse ASID array offset
396*4882a593Smuzhiyun 	 * cpuinfo_mips is a multiple of sizeof(long)
397*4882a593Smuzhiyun 	 */
398*4882a593Smuzhiyun 	uasm_i_addiu(&p, T3, ZERO, sizeof(struct cpuinfo_mips)/sizeof(long));
399*4882a593Smuzhiyun 	uasm_i_mul(&p, T2, T2, T3);
400*4882a593Smuzhiyun 
401*4882a593Smuzhiyun 	UASM_i_LA_mostly(&p, AT, (long)&cpu_data[0].asid_mask);
402*4882a593Smuzhiyun 	UASM_i_ADDU(&p, AT, AT, T2);
403*4882a593Smuzhiyun 	UASM_i_LW(&p, T2, uasm_rel_lo((long)&cpu_data[0].asid_mask), AT);
404*4882a593Smuzhiyun 	uasm_i_and(&p, K0, K0, T2);
405*4882a593Smuzhiyun #else
406*4882a593Smuzhiyun 	uasm_i_andi(&p, K0, K0, MIPS_ENTRYHI_ASID);
407*4882a593Smuzhiyun #endif
408*4882a593Smuzhiyun 
409*4882a593Smuzhiyun #ifndef CONFIG_KVM_MIPS_VZ
410*4882a593Smuzhiyun 	/*
411*4882a593Smuzhiyun 	 * Set up KVM T&E GVA pgd.
412*4882a593Smuzhiyun 	 * This does roughly the same as TLBMISS_HANDLER_SETUP_PGD():
413*4882a593Smuzhiyun 	 * - call tlbmiss_handler_setup_pgd(mm->pgd)
414*4882a593Smuzhiyun 	 * - but skips write into CP0_PWBase for now
415*4882a593Smuzhiyun 	 */
416*4882a593Smuzhiyun 	UASM_i_LW(&p, A0, (int)offsetof(struct mm_struct, pgd) -
417*4882a593Smuzhiyun 			  (int)offsetof(struct mm_struct, context.asid), T1);
418*4882a593Smuzhiyun 
419*4882a593Smuzhiyun 	UASM_i_LA(&p, T9, (unsigned long)tlbmiss_handler_setup_pgd);
420*4882a593Smuzhiyun 	uasm_i_jalr(&p, RA, T9);
421*4882a593Smuzhiyun 	 uasm_i_mtc0(&p, K0, C0_ENTRYHI);
422*4882a593Smuzhiyun #else
423*4882a593Smuzhiyun 	/* Set up KVM VZ root ASID (!guestid) */
424*4882a593Smuzhiyun 	uasm_i_mtc0(&p, K0, C0_ENTRYHI);
425*4882a593Smuzhiyun skip_asid_restore:
426*4882a593Smuzhiyun #endif
427*4882a593Smuzhiyun 	uasm_i_ehb(&p);
428*4882a593Smuzhiyun 
429*4882a593Smuzhiyun 	/* Disable RDHWR access */
430*4882a593Smuzhiyun 	uasm_i_mtc0(&p, ZERO, C0_HWRENA);
431*4882a593Smuzhiyun 
432*4882a593Smuzhiyun 	/* load the guest context from VCPU and return */
433*4882a593Smuzhiyun 	for (i = 1; i < 32; ++i) {
434*4882a593Smuzhiyun 		/* Guest k0/k1 loaded later */
435*4882a593Smuzhiyun 		if (i == K0 || i == K1)
436*4882a593Smuzhiyun 			continue;
437*4882a593Smuzhiyun 		UASM_i_LW(&p, i, offsetof(struct kvm_vcpu_arch, gprs[i]), K1);
438*4882a593Smuzhiyun 	}
439*4882a593Smuzhiyun 
440*4882a593Smuzhiyun #ifndef CONFIG_CPU_MIPSR6
441*4882a593Smuzhiyun 	/* Restore hi/lo */
442*4882a593Smuzhiyun 	UASM_i_LW(&p, K0, offsetof(struct kvm_vcpu_arch, hi), K1);
443*4882a593Smuzhiyun 	uasm_i_mthi(&p, K0);
444*4882a593Smuzhiyun 
445*4882a593Smuzhiyun 	UASM_i_LW(&p, K0, offsetof(struct kvm_vcpu_arch, lo), K1);
446*4882a593Smuzhiyun 	uasm_i_mtlo(&p, K0);
447*4882a593Smuzhiyun #endif
448*4882a593Smuzhiyun 
449*4882a593Smuzhiyun 	/* Restore the guest's k0/k1 registers */
450*4882a593Smuzhiyun 	UASM_i_LW(&p, K0, offsetof(struct kvm_vcpu_arch, gprs[K0]), K1);
451*4882a593Smuzhiyun 	UASM_i_LW(&p, K1, offsetof(struct kvm_vcpu_arch, gprs[K1]), K1);
452*4882a593Smuzhiyun 
453*4882a593Smuzhiyun 	/* Jump to guest */
454*4882a593Smuzhiyun 	uasm_i_eret(&p);
455*4882a593Smuzhiyun 
456*4882a593Smuzhiyun 	uasm_resolve_relocs(relocs, labels);
457*4882a593Smuzhiyun 
458*4882a593Smuzhiyun 	return p;
459*4882a593Smuzhiyun }
460*4882a593Smuzhiyun 
461*4882a593Smuzhiyun /**
462*4882a593Smuzhiyun  * kvm_mips_build_tlb_refill_exception() - Assemble TLB refill handler.
463*4882a593Smuzhiyun  * @addr:	Address to start writing code.
464*4882a593Smuzhiyun  * @handler:	Address of common handler (within range of @addr).
465*4882a593Smuzhiyun  *
466*4882a593Smuzhiyun  * Assemble TLB refill exception fast path handler for guest execution.
467*4882a593Smuzhiyun  *
468*4882a593Smuzhiyun  * Returns:	Next address after end of written function.
469*4882a593Smuzhiyun  */
kvm_mips_build_tlb_refill_exception(void * addr,void * handler)470*4882a593Smuzhiyun void *kvm_mips_build_tlb_refill_exception(void *addr, void *handler)
471*4882a593Smuzhiyun {
472*4882a593Smuzhiyun 	u32 *p = addr;
473*4882a593Smuzhiyun 	struct uasm_label labels[2];
474*4882a593Smuzhiyun 	struct uasm_reloc relocs[2];
475*4882a593Smuzhiyun #ifndef CONFIG_CPU_LOONGSON64
476*4882a593Smuzhiyun 	struct uasm_label *l = labels;
477*4882a593Smuzhiyun 	struct uasm_reloc *r = relocs;
478*4882a593Smuzhiyun #endif
479*4882a593Smuzhiyun 
480*4882a593Smuzhiyun 	memset(labels, 0, sizeof(labels));
481*4882a593Smuzhiyun 	memset(relocs, 0, sizeof(relocs));
482*4882a593Smuzhiyun 
483*4882a593Smuzhiyun 	/* Save guest k1 into scratch register */
484*4882a593Smuzhiyun 	UASM_i_MTC0(&p, K1, scratch_tmp[0], scratch_tmp[1]);
485*4882a593Smuzhiyun 
486*4882a593Smuzhiyun 	/* Get the VCPU pointer from the VCPU scratch register */
487*4882a593Smuzhiyun 	UASM_i_MFC0(&p, K1, scratch_vcpu[0], scratch_vcpu[1]);
488*4882a593Smuzhiyun 
489*4882a593Smuzhiyun 	/* Save guest k0 into VCPU structure */
490*4882a593Smuzhiyun 	UASM_i_SW(&p, K0, offsetof(struct kvm_vcpu, arch.gprs[K0]), K1);
491*4882a593Smuzhiyun 
492*4882a593Smuzhiyun 	/*
493*4882a593Smuzhiyun 	 * Some of the common tlbex code uses current_cpu_type(). For KVM we
494*4882a593Smuzhiyun 	 * assume symmetry and just disable preemption to silence the warning.
495*4882a593Smuzhiyun 	 */
496*4882a593Smuzhiyun 	preempt_disable();
497*4882a593Smuzhiyun 
498*4882a593Smuzhiyun #ifdef CONFIG_CPU_LOONGSON64
499*4882a593Smuzhiyun 	UASM_i_MFC0(&p, K1, C0_PGD);
500*4882a593Smuzhiyun 	uasm_i_lddir(&p, K0, K1, 3);  /* global page dir */
501*4882a593Smuzhiyun #ifndef __PAGETABLE_PMD_FOLDED
502*4882a593Smuzhiyun 	uasm_i_lddir(&p, K1, K0, 1);  /* middle page dir */
503*4882a593Smuzhiyun #endif
504*4882a593Smuzhiyun 	uasm_i_ldpte(&p, K1, 0);      /* even */
505*4882a593Smuzhiyun 	uasm_i_ldpte(&p, K1, 1);      /* odd */
506*4882a593Smuzhiyun 	uasm_i_tlbwr(&p);
507*4882a593Smuzhiyun #else
508*4882a593Smuzhiyun 	/*
509*4882a593Smuzhiyun 	 * Now for the actual refill bit. A lot of this can be common with the
510*4882a593Smuzhiyun 	 * Linux TLB refill handler, however we don't need to handle so many
511*4882a593Smuzhiyun 	 * cases. We only need to handle user mode refills, and user mode runs
512*4882a593Smuzhiyun 	 * with 32-bit addressing.
513*4882a593Smuzhiyun 	 *
514*4882a593Smuzhiyun 	 * Therefore the branch to label_vmalloc generated by build_get_pmde64()
515*4882a593Smuzhiyun 	 * that isn't resolved should never actually get taken and is harmless
516*4882a593Smuzhiyun 	 * to leave in place for now.
517*4882a593Smuzhiyun 	 */
518*4882a593Smuzhiyun 
519*4882a593Smuzhiyun #ifdef CONFIG_64BIT
520*4882a593Smuzhiyun 	build_get_pmde64(&p, &l, &r, K0, K1); /* get pmd in K1 */
521*4882a593Smuzhiyun #else
522*4882a593Smuzhiyun 	build_get_pgde32(&p, K0, K1); /* get pgd in K1 */
523*4882a593Smuzhiyun #endif
524*4882a593Smuzhiyun 
525*4882a593Smuzhiyun 	/* we don't support huge pages yet */
526*4882a593Smuzhiyun 
527*4882a593Smuzhiyun 	build_get_ptep(&p, K0, K1);
528*4882a593Smuzhiyun 	build_update_entries(&p, K0, K1);
529*4882a593Smuzhiyun 	build_tlb_write_entry(&p, &l, &r, tlb_random);
530*4882a593Smuzhiyun #endif
531*4882a593Smuzhiyun 
532*4882a593Smuzhiyun 	preempt_enable();
533*4882a593Smuzhiyun 
534*4882a593Smuzhiyun 	/* Get the VCPU pointer from the VCPU scratch register again */
535*4882a593Smuzhiyun 	UASM_i_MFC0(&p, K1, scratch_vcpu[0], scratch_vcpu[1]);
536*4882a593Smuzhiyun 
537*4882a593Smuzhiyun 	/* Restore the guest's k0/k1 registers */
538*4882a593Smuzhiyun 	UASM_i_LW(&p, K0, offsetof(struct kvm_vcpu, arch.gprs[K0]), K1);
539*4882a593Smuzhiyun 	uasm_i_ehb(&p);
540*4882a593Smuzhiyun 	UASM_i_MFC0(&p, K1, scratch_tmp[0], scratch_tmp[1]);
541*4882a593Smuzhiyun 
542*4882a593Smuzhiyun 	/* Jump to guest */
543*4882a593Smuzhiyun 	uasm_i_eret(&p);
544*4882a593Smuzhiyun 
545*4882a593Smuzhiyun 	return p;
546*4882a593Smuzhiyun }
547*4882a593Smuzhiyun 
548*4882a593Smuzhiyun /**
549*4882a593Smuzhiyun  * kvm_mips_build_exception() - Assemble first level guest exception handler.
550*4882a593Smuzhiyun  * @addr:	Address to start writing code.
551*4882a593Smuzhiyun  * @handler:	Address of common handler (within range of @addr).
552*4882a593Smuzhiyun  *
553*4882a593Smuzhiyun  * Assemble exception vector code for guest execution. The generated vector will
554*4882a593Smuzhiyun  * branch to the common exception handler generated by kvm_mips_build_exit().
555*4882a593Smuzhiyun  *
556*4882a593Smuzhiyun  * Returns:	Next address after end of written function.
557*4882a593Smuzhiyun  */
kvm_mips_build_exception(void * addr,void * handler)558*4882a593Smuzhiyun void *kvm_mips_build_exception(void *addr, void *handler)
559*4882a593Smuzhiyun {
560*4882a593Smuzhiyun 	u32 *p = addr;
561*4882a593Smuzhiyun 	struct uasm_label labels[2];
562*4882a593Smuzhiyun 	struct uasm_reloc relocs[2];
563*4882a593Smuzhiyun 	struct uasm_label *l = labels;
564*4882a593Smuzhiyun 	struct uasm_reloc *r = relocs;
565*4882a593Smuzhiyun 
566*4882a593Smuzhiyun 	memset(labels, 0, sizeof(labels));
567*4882a593Smuzhiyun 	memset(relocs, 0, sizeof(relocs));
568*4882a593Smuzhiyun 
569*4882a593Smuzhiyun 	/* Save guest k1 into scratch register */
570*4882a593Smuzhiyun 	UASM_i_MTC0(&p, K1, scratch_tmp[0], scratch_tmp[1]);
571*4882a593Smuzhiyun 
572*4882a593Smuzhiyun 	/* Get the VCPU pointer from the VCPU scratch register */
573*4882a593Smuzhiyun 	UASM_i_MFC0(&p, K1, scratch_vcpu[0], scratch_vcpu[1]);
574*4882a593Smuzhiyun 	UASM_i_ADDIU(&p, K1, K1, offsetof(struct kvm_vcpu, arch));
575*4882a593Smuzhiyun 
576*4882a593Smuzhiyun 	/* Save guest k0 into VCPU structure */
577*4882a593Smuzhiyun 	UASM_i_SW(&p, K0, offsetof(struct kvm_vcpu_arch, gprs[K0]), K1);
578*4882a593Smuzhiyun 
579*4882a593Smuzhiyun 	/* Branch to the common handler */
580*4882a593Smuzhiyun 	uasm_il_b(&p, &r, label_exit_common);
581*4882a593Smuzhiyun 	 uasm_i_nop(&p);
582*4882a593Smuzhiyun 
583*4882a593Smuzhiyun 	uasm_l_exit_common(&l, handler);
584*4882a593Smuzhiyun 	uasm_resolve_relocs(relocs, labels);
585*4882a593Smuzhiyun 
586*4882a593Smuzhiyun 	return p;
587*4882a593Smuzhiyun }
588*4882a593Smuzhiyun 
589*4882a593Smuzhiyun /**
590*4882a593Smuzhiyun  * kvm_mips_build_exit() - Assemble common guest exit handler.
591*4882a593Smuzhiyun  * @addr:	Address to start writing code.
592*4882a593Smuzhiyun  *
593*4882a593Smuzhiyun  * Assemble the generic guest exit handling code. This is called by the
594*4882a593Smuzhiyun  * exception vectors (generated by kvm_mips_build_exception()), and calls
595*4882a593Smuzhiyun  * kvm_mips_handle_exit(), then either resumes the guest or returns to the host
596*4882a593Smuzhiyun  * depending on the return value.
597*4882a593Smuzhiyun  *
598*4882a593Smuzhiyun  * Returns:	Next address after end of written function.
599*4882a593Smuzhiyun  */
kvm_mips_build_exit(void * addr)600*4882a593Smuzhiyun void *kvm_mips_build_exit(void *addr)
601*4882a593Smuzhiyun {
602*4882a593Smuzhiyun 	u32 *p = addr;
603*4882a593Smuzhiyun 	unsigned int i;
604*4882a593Smuzhiyun 	struct uasm_label labels[3];
605*4882a593Smuzhiyun 	struct uasm_reloc relocs[3];
606*4882a593Smuzhiyun 	struct uasm_label *l = labels;
607*4882a593Smuzhiyun 	struct uasm_reloc *r = relocs;
608*4882a593Smuzhiyun 
609*4882a593Smuzhiyun 	memset(labels, 0, sizeof(labels));
610*4882a593Smuzhiyun 	memset(relocs, 0, sizeof(relocs));
611*4882a593Smuzhiyun 
612*4882a593Smuzhiyun 	/*
613*4882a593Smuzhiyun 	 * Generic Guest exception handler. We end up here when the guest
614*4882a593Smuzhiyun 	 * does something that causes a trap to kernel mode.
615*4882a593Smuzhiyun 	 *
616*4882a593Smuzhiyun 	 * Both k0/k1 registers will have already been saved (k0 into the vcpu
617*4882a593Smuzhiyun 	 * structure, and k1 into the scratch_tmp register).
618*4882a593Smuzhiyun 	 *
619*4882a593Smuzhiyun 	 * The k1 register will already contain the kvm_vcpu_arch pointer.
620*4882a593Smuzhiyun 	 */
621*4882a593Smuzhiyun 
622*4882a593Smuzhiyun 	/* Start saving Guest context to VCPU */
623*4882a593Smuzhiyun 	for (i = 0; i < 32; ++i) {
624*4882a593Smuzhiyun 		/* Guest k0/k1 saved later */
625*4882a593Smuzhiyun 		if (i == K0 || i == K1)
626*4882a593Smuzhiyun 			continue;
627*4882a593Smuzhiyun 		UASM_i_SW(&p, i, offsetof(struct kvm_vcpu_arch, gprs[i]), K1);
628*4882a593Smuzhiyun 	}
629*4882a593Smuzhiyun 
630*4882a593Smuzhiyun #ifndef CONFIG_CPU_MIPSR6
631*4882a593Smuzhiyun 	/* We need to save hi/lo and restore them on the way out */
632*4882a593Smuzhiyun 	uasm_i_mfhi(&p, T0);
633*4882a593Smuzhiyun 	UASM_i_SW(&p, T0, offsetof(struct kvm_vcpu_arch, hi), K1);
634*4882a593Smuzhiyun 
635*4882a593Smuzhiyun 	uasm_i_mflo(&p, T0);
636*4882a593Smuzhiyun 	UASM_i_SW(&p, T0, offsetof(struct kvm_vcpu_arch, lo), K1);
637*4882a593Smuzhiyun #endif
638*4882a593Smuzhiyun 
639*4882a593Smuzhiyun 	/* Finally save guest k1 to VCPU */
640*4882a593Smuzhiyun 	uasm_i_ehb(&p);
641*4882a593Smuzhiyun 	UASM_i_MFC0(&p, T0, scratch_tmp[0], scratch_tmp[1]);
642*4882a593Smuzhiyun 	UASM_i_SW(&p, T0, offsetof(struct kvm_vcpu_arch, gprs[K1]), K1);
643*4882a593Smuzhiyun 
644*4882a593Smuzhiyun 	/* Now that context has been saved, we can use other registers */
645*4882a593Smuzhiyun 
646*4882a593Smuzhiyun 	/* Restore vcpu */
647*4882a593Smuzhiyun 	UASM_i_MFC0(&p, S0, scratch_vcpu[0], scratch_vcpu[1]);
648*4882a593Smuzhiyun 
649*4882a593Smuzhiyun 	/*
650*4882a593Smuzhiyun 	 * Save Host level EPC, BadVaddr and Cause to VCPU, useful to process
651*4882a593Smuzhiyun 	 * the exception
652*4882a593Smuzhiyun 	 */
653*4882a593Smuzhiyun 	UASM_i_MFC0(&p, K0, C0_EPC);
654*4882a593Smuzhiyun 	UASM_i_SW(&p, K0, offsetof(struct kvm_vcpu_arch, pc), K1);
655*4882a593Smuzhiyun 
656*4882a593Smuzhiyun 	UASM_i_MFC0(&p, K0, C0_BADVADDR);
657*4882a593Smuzhiyun 	UASM_i_SW(&p, K0, offsetof(struct kvm_vcpu_arch, host_cp0_badvaddr),
658*4882a593Smuzhiyun 		  K1);
659*4882a593Smuzhiyun 
660*4882a593Smuzhiyun 	uasm_i_mfc0(&p, K0, C0_CAUSE);
661*4882a593Smuzhiyun 	uasm_i_sw(&p, K0, offsetof(struct kvm_vcpu_arch, host_cp0_cause), K1);
662*4882a593Smuzhiyun 
663*4882a593Smuzhiyun 	if (cpu_has_badinstr) {
664*4882a593Smuzhiyun 		uasm_i_mfc0(&p, K0, C0_BADINSTR);
665*4882a593Smuzhiyun 		uasm_i_sw(&p, K0, offsetof(struct kvm_vcpu_arch,
666*4882a593Smuzhiyun 					   host_cp0_badinstr), K1);
667*4882a593Smuzhiyun 	}
668*4882a593Smuzhiyun 
669*4882a593Smuzhiyun 	if (cpu_has_badinstrp) {
670*4882a593Smuzhiyun 		uasm_i_mfc0(&p, K0, C0_BADINSTRP);
671*4882a593Smuzhiyun 		uasm_i_sw(&p, K0, offsetof(struct kvm_vcpu_arch,
672*4882a593Smuzhiyun 					   host_cp0_badinstrp), K1);
673*4882a593Smuzhiyun 	}
674*4882a593Smuzhiyun 
675*4882a593Smuzhiyun 	/* Now restore the host state just enough to run the handlers */
676*4882a593Smuzhiyun 
677*4882a593Smuzhiyun 	/* Switch EBASE to the one used by Linux */
678*4882a593Smuzhiyun 	/* load up the host EBASE */
679*4882a593Smuzhiyun 	uasm_i_mfc0(&p, V0, C0_STATUS);
680*4882a593Smuzhiyun 
681*4882a593Smuzhiyun 	uasm_i_lui(&p, AT, ST0_BEV >> 16);
682*4882a593Smuzhiyun 	uasm_i_or(&p, K0, V0, AT);
683*4882a593Smuzhiyun 
684*4882a593Smuzhiyun 	uasm_i_mtc0(&p, K0, C0_STATUS);
685*4882a593Smuzhiyun 	uasm_i_ehb(&p);
686*4882a593Smuzhiyun 
687*4882a593Smuzhiyun 	UASM_i_LA_mostly(&p, K0, (long)&ebase);
688*4882a593Smuzhiyun 	UASM_i_LW(&p, K0, uasm_rel_lo((long)&ebase), K0);
689*4882a593Smuzhiyun 	build_set_exc_base(&p, K0);
690*4882a593Smuzhiyun 
691*4882a593Smuzhiyun 	if (raw_cpu_has_fpu) {
692*4882a593Smuzhiyun 		/*
693*4882a593Smuzhiyun 		 * If FPU is enabled, save FCR31 and clear it so that later
694*4882a593Smuzhiyun 		 * ctc1's don't trigger FPE for pending exceptions.
695*4882a593Smuzhiyun 		 */
696*4882a593Smuzhiyun 		uasm_i_lui(&p, AT, ST0_CU1 >> 16);
697*4882a593Smuzhiyun 		uasm_i_and(&p, V1, V0, AT);
698*4882a593Smuzhiyun 		uasm_il_beqz(&p, &r, V1, label_fpu_1);
699*4882a593Smuzhiyun 		 uasm_i_nop(&p);
700*4882a593Smuzhiyun 		uasm_i_cfc1(&p, T0, 31);
701*4882a593Smuzhiyun 		uasm_i_sw(&p, T0, offsetof(struct kvm_vcpu_arch, fpu.fcr31),
702*4882a593Smuzhiyun 			  K1);
703*4882a593Smuzhiyun 		uasm_i_ctc1(&p, ZERO, 31);
704*4882a593Smuzhiyun 		uasm_l_fpu_1(&l, p);
705*4882a593Smuzhiyun 	}
706*4882a593Smuzhiyun 
707*4882a593Smuzhiyun 	if (cpu_has_msa) {
708*4882a593Smuzhiyun 		/*
709*4882a593Smuzhiyun 		 * If MSA is enabled, save MSACSR and clear it so that later
710*4882a593Smuzhiyun 		 * instructions don't trigger MSAFPE for pending exceptions.
711*4882a593Smuzhiyun 		 */
712*4882a593Smuzhiyun 		uasm_i_mfc0(&p, T0, C0_CONFIG5);
713*4882a593Smuzhiyun 		uasm_i_ext(&p, T0, T0, 27, 1); /* MIPS_CONF5_MSAEN */
714*4882a593Smuzhiyun 		uasm_il_beqz(&p, &r, T0, label_msa_1);
715*4882a593Smuzhiyun 		 uasm_i_nop(&p);
716*4882a593Smuzhiyun 		uasm_i_cfcmsa(&p, T0, MSA_CSR);
717*4882a593Smuzhiyun 		uasm_i_sw(&p, T0, offsetof(struct kvm_vcpu_arch, fpu.msacsr),
718*4882a593Smuzhiyun 			  K1);
719*4882a593Smuzhiyun 		uasm_i_ctcmsa(&p, MSA_CSR, ZERO);
720*4882a593Smuzhiyun 		uasm_l_msa_1(&l, p);
721*4882a593Smuzhiyun 	}
722*4882a593Smuzhiyun 
723*4882a593Smuzhiyun #ifdef CONFIG_KVM_MIPS_VZ
724*4882a593Smuzhiyun 	/* Restore host ASID */
725*4882a593Smuzhiyun 	if (!cpu_has_guestid) {
726*4882a593Smuzhiyun 		UASM_i_LW(&p, K0, offsetof(struct kvm_vcpu_arch, host_entryhi),
727*4882a593Smuzhiyun 			  K1);
728*4882a593Smuzhiyun 		UASM_i_MTC0(&p, K0, C0_ENTRYHI);
729*4882a593Smuzhiyun 	}
730*4882a593Smuzhiyun 
731*4882a593Smuzhiyun 	/*
732*4882a593Smuzhiyun 	 * Set up normal Linux process pgd.
733*4882a593Smuzhiyun 	 * This does roughly the same as TLBMISS_HANDLER_SETUP_PGD():
734*4882a593Smuzhiyun 	 * - call tlbmiss_handler_setup_pgd(mm->pgd)
735*4882a593Smuzhiyun 	 * - write mm->pgd into CP0_PWBase
736*4882a593Smuzhiyun 	 */
737*4882a593Smuzhiyun 	UASM_i_LW(&p, A0,
738*4882a593Smuzhiyun 		  offsetof(struct kvm_vcpu_arch, host_pgd), K1);
739*4882a593Smuzhiyun 	UASM_i_LA(&p, T9, (unsigned long)tlbmiss_handler_setup_pgd);
740*4882a593Smuzhiyun 	uasm_i_jalr(&p, RA, T9);
741*4882a593Smuzhiyun 	/* delay slot */
742*4882a593Smuzhiyun 	if (cpu_has_htw)
743*4882a593Smuzhiyun 		UASM_i_MTC0(&p, A0, C0_PWBASE);
744*4882a593Smuzhiyun 	else
745*4882a593Smuzhiyun 		uasm_i_nop(&p);
746*4882a593Smuzhiyun 
747*4882a593Smuzhiyun 	/* Clear GM bit so we don't enter guest mode when EXL is cleared */
748*4882a593Smuzhiyun 	uasm_i_mfc0(&p, K0, C0_GUESTCTL0);
749*4882a593Smuzhiyun 	uasm_i_ins(&p, K0, ZERO, MIPS_GCTL0_GM_SHIFT, 1);
750*4882a593Smuzhiyun 	uasm_i_mtc0(&p, K0, C0_GUESTCTL0);
751*4882a593Smuzhiyun 
752*4882a593Smuzhiyun 	/* Save GuestCtl0 so we can access GExcCode after CPU migration */
753*4882a593Smuzhiyun 	uasm_i_sw(&p, K0,
754*4882a593Smuzhiyun 		  offsetof(struct kvm_vcpu_arch, host_cp0_guestctl0), K1);
755*4882a593Smuzhiyun 
756*4882a593Smuzhiyun 	if (cpu_has_guestid) {
757*4882a593Smuzhiyun 		/*
758*4882a593Smuzhiyun 		 * Clear root mode GuestID, so that root TLB operations use the
759*4882a593Smuzhiyun 		 * root GuestID in the root TLB.
760*4882a593Smuzhiyun 		 */
761*4882a593Smuzhiyun 		uasm_i_mfc0(&p, T0, C0_GUESTCTL1);
762*4882a593Smuzhiyun 		/* Set GuestCtl1.RID = MIPS_GCTL1_ROOT_GUESTID (i.e. 0) */
763*4882a593Smuzhiyun 		uasm_i_ins(&p, T0, ZERO, MIPS_GCTL1_RID_SHIFT,
764*4882a593Smuzhiyun 			   MIPS_GCTL1_RID_WIDTH);
765*4882a593Smuzhiyun 		uasm_i_mtc0(&p, T0, C0_GUESTCTL1);
766*4882a593Smuzhiyun 	}
767*4882a593Smuzhiyun #endif
768*4882a593Smuzhiyun 
769*4882a593Smuzhiyun 	/* Now that the new EBASE has been loaded, unset BEV and KSU_USER */
770*4882a593Smuzhiyun 	uasm_i_addiu(&p, AT, ZERO, ~(ST0_EXL | KSU_USER | ST0_IE));
771*4882a593Smuzhiyun 	uasm_i_and(&p, V0, V0, AT);
772*4882a593Smuzhiyun 	uasm_i_lui(&p, AT, ST0_CU0 >> 16);
773*4882a593Smuzhiyun 	uasm_i_or(&p, V0, V0, AT);
774*4882a593Smuzhiyun #ifdef CONFIG_64BIT
775*4882a593Smuzhiyun 	uasm_i_ori(&p, V0, V0, ST0_SX | ST0_UX);
776*4882a593Smuzhiyun #endif
777*4882a593Smuzhiyun 	uasm_i_mtc0(&p, V0, C0_STATUS);
778*4882a593Smuzhiyun 	uasm_i_ehb(&p);
779*4882a593Smuzhiyun 
780*4882a593Smuzhiyun 	/* Load up host GP */
781*4882a593Smuzhiyun 	UASM_i_LW(&p, GP, offsetof(struct kvm_vcpu_arch, host_gp), K1);
782*4882a593Smuzhiyun 
783*4882a593Smuzhiyun 	/* Need a stack before we can jump to "C" */
784*4882a593Smuzhiyun 	UASM_i_LW(&p, SP, offsetof(struct kvm_vcpu_arch, host_stack), K1);
785*4882a593Smuzhiyun 
786*4882a593Smuzhiyun 	/* Saved host state */
787*4882a593Smuzhiyun 	UASM_i_ADDIU(&p, SP, SP, -(int)sizeof(struct pt_regs));
788*4882a593Smuzhiyun 
789*4882a593Smuzhiyun 	/*
790*4882a593Smuzhiyun 	 * XXXKYMA do we need to load the host ASID, maybe not because the
791*4882a593Smuzhiyun 	 * kernel entries are marked GLOBAL, need to verify
792*4882a593Smuzhiyun 	 */
793*4882a593Smuzhiyun 
794*4882a593Smuzhiyun 	/* Restore host scratch registers, as we'll have clobbered them */
795*4882a593Smuzhiyun 	kvm_mips_build_restore_scratch(&p, K0, SP);
796*4882a593Smuzhiyun 
797*4882a593Smuzhiyun 	/* Restore RDHWR access */
798*4882a593Smuzhiyun 	UASM_i_LA_mostly(&p, K0, (long)&hwrena);
799*4882a593Smuzhiyun 	uasm_i_lw(&p, K0, uasm_rel_lo((long)&hwrena), K0);
800*4882a593Smuzhiyun 	uasm_i_mtc0(&p, K0, C0_HWRENA);
801*4882a593Smuzhiyun 
802*4882a593Smuzhiyun 	/* Jump to handler */
803*4882a593Smuzhiyun 	/*
804*4882a593Smuzhiyun 	 * XXXKYMA: not sure if this is safe, how large is the stack??
805*4882a593Smuzhiyun 	 * Now jump to the kvm_mips_handle_exit() to see if we can deal
806*4882a593Smuzhiyun 	 * with this in the kernel
807*4882a593Smuzhiyun 	 */
808*4882a593Smuzhiyun 	uasm_i_move(&p, A0, S0);
809*4882a593Smuzhiyun 	UASM_i_LA(&p, T9, (unsigned long)kvm_mips_handle_exit);
810*4882a593Smuzhiyun 	uasm_i_jalr(&p, RA, T9);
811*4882a593Smuzhiyun 	 UASM_i_ADDIU(&p, SP, SP, -CALLFRAME_SIZ);
812*4882a593Smuzhiyun 
813*4882a593Smuzhiyun 	uasm_resolve_relocs(relocs, labels);
814*4882a593Smuzhiyun 
815*4882a593Smuzhiyun 	p = kvm_mips_build_ret_from_exit(p);
816*4882a593Smuzhiyun 
817*4882a593Smuzhiyun 	return p;
818*4882a593Smuzhiyun }
819*4882a593Smuzhiyun 
820*4882a593Smuzhiyun /**
821*4882a593Smuzhiyun  * kvm_mips_build_ret_from_exit() - Assemble guest exit return handler.
822*4882a593Smuzhiyun  * @addr:	Address to start writing code.
823*4882a593Smuzhiyun  *
824*4882a593Smuzhiyun  * Assemble the code to handle the return from kvm_mips_handle_exit(), either
825*4882a593Smuzhiyun  * resuming the guest or returning to the host depending on the return value.
826*4882a593Smuzhiyun  *
827*4882a593Smuzhiyun  * Returns:	Next address after end of written function.
828*4882a593Smuzhiyun  */
kvm_mips_build_ret_from_exit(void * addr)829*4882a593Smuzhiyun static void *kvm_mips_build_ret_from_exit(void *addr)
830*4882a593Smuzhiyun {
831*4882a593Smuzhiyun 	u32 *p = addr;
832*4882a593Smuzhiyun 	struct uasm_label labels[2];
833*4882a593Smuzhiyun 	struct uasm_reloc relocs[2];
834*4882a593Smuzhiyun 	struct uasm_label *l = labels;
835*4882a593Smuzhiyun 	struct uasm_reloc *r = relocs;
836*4882a593Smuzhiyun 
837*4882a593Smuzhiyun 	memset(labels, 0, sizeof(labels));
838*4882a593Smuzhiyun 	memset(relocs, 0, sizeof(relocs));
839*4882a593Smuzhiyun 
840*4882a593Smuzhiyun 	/* Return from handler Make sure interrupts are disabled */
841*4882a593Smuzhiyun 	uasm_i_di(&p, ZERO);
842*4882a593Smuzhiyun 	uasm_i_ehb(&p);
843*4882a593Smuzhiyun 
844*4882a593Smuzhiyun 	/*
845*4882a593Smuzhiyun 	 * XXXKYMA: k0/k1 could have been blown away if we processed
846*4882a593Smuzhiyun 	 * an exception while we were handling the exception from the
847*4882a593Smuzhiyun 	 * guest, reload k1
848*4882a593Smuzhiyun 	 */
849*4882a593Smuzhiyun 
850*4882a593Smuzhiyun 	uasm_i_move(&p, K1, S0);
851*4882a593Smuzhiyun 	UASM_i_ADDIU(&p, K1, K1, offsetof(struct kvm_vcpu, arch));
852*4882a593Smuzhiyun 
853*4882a593Smuzhiyun 	/*
854*4882a593Smuzhiyun 	 * Check return value, should tell us if we are returning to the
855*4882a593Smuzhiyun 	 * host (handle I/O etc)or resuming the guest
856*4882a593Smuzhiyun 	 */
857*4882a593Smuzhiyun 	uasm_i_andi(&p, T0, V0, RESUME_HOST);
858*4882a593Smuzhiyun 	uasm_il_bnez(&p, &r, T0, label_return_to_host);
859*4882a593Smuzhiyun 	 uasm_i_nop(&p);
860*4882a593Smuzhiyun 
861*4882a593Smuzhiyun 	p = kvm_mips_build_ret_to_guest(p);
862*4882a593Smuzhiyun 
863*4882a593Smuzhiyun 	uasm_l_return_to_host(&l, p);
864*4882a593Smuzhiyun 	p = kvm_mips_build_ret_to_host(p);
865*4882a593Smuzhiyun 
866*4882a593Smuzhiyun 	uasm_resolve_relocs(relocs, labels);
867*4882a593Smuzhiyun 
868*4882a593Smuzhiyun 	return p;
869*4882a593Smuzhiyun }
870*4882a593Smuzhiyun 
871*4882a593Smuzhiyun /**
872*4882a593Smuzhiyun  * kvm_mips_build_ret_to_guest() - Assemble code to return to the guest.
873*4882a593Smuzhiyun  * @addr:	Address to start writing code.
874*4882a593Smuzhiyun  *
875*4882a593Smuzhiyun  * Assemble the code to handle return from the guest exit handler
876*4882a593Smuzhiyun  * (kvm_mips_handle_exit()) back to the guest.
877*4882a593Smuzhiyun  *
878*4882a593Smuzhiyun  * Returns:	Next address after end of written function.
879*4882a593Smuzhiyun  */
kvm_mips_build_ret_to_guest(void * addr)880*4882a593Smuzhiyun static void *kvm_mips_build_ret_to_guest(void *addr)
881*4882a593Smuzhiyun {
882*4882a593Smuzhiyun 	u32 *p = addr;
883*4882a593Smuzhiyun 
884*4882a593Smuzhiyun 	/* Put the saved pointer to vcpu (s0) back into the scratch register */
885*4882a593Smuzhiyun 	UASM_i_MTC0(&p, S0, scratch_vcpu[0], scratch_vcpu[1]);
886*4882a593Smuzhiyun 
887*4882a593Smuzhiyun 	/* Load up the Guest EBASE to minimize the window where BEV is set */
888*4882a593Smuzhiyun 	UASM_i_LW(&p, T0, offsetof(struct kvm_vcpu_arch, guest_ebase), K1);
889*4882a593Smuzhiyun 
890*4882a593Smuzhiyun 	/* Switch EBASE back to the one used by KVM */
891*4882a593Smuzhiyun 	uasm_i_mfc0(&p, V1, C0_STATUS);
892*4882a593Smuzhiyun 	uasm_i_lui(&p, AT, ST0_BEV >> 16);
893*4882a593Smuzhiyun 	uasm_i_or(&p, K0, V1, AT);
894*4882a593Smuzhiyun 	uasm_i_mtc0(&p, K0, C0_STATUS);
895*4882a593Smuzhiyun 	uasm_i_ehb(&p);
896*4882a593Smuzhiyun 	build_set_exc_base(&p, T0);
897*4882a593Smuzhiyun 
898*4882a593Smuzhiyun 	/* Setup status register for running guest in UM */
899*4882a593Smuzhiyun 	uasm_i_ori(&p, V1, V1, ST0_EXL | KSU_USER | ST0_IE);
900*4882a593Smuzhiyun 	UASM_i_LA(&p, AT, ~(ST0_CU0 | ST0_MX | ST0_SX | ST0_UX));
901*4882a593Smuzhiyun 	uasm_i_and(&p, V1, V1, AT);
902*4882a593Smuzhiyun 	uasm_i_mtc0(&p, V1, C0_STATUS);
903*4882a593Smuzhiyun 	uasm_i_ehb(&p);
904*4882a593Smuzhiyun 
905*4882a593Smuzhiyun 	p = kvm_mips_build_enter_guest(p);
906*4882a593Smuzhiyun 
907*4882a593Smuzhiyun 	return p;
908*4882a593Smuzhiyun }
909*4882a593Smuzhiyun 
910*4882a593Smuzhiyun /**
911*4882a593Smuzhiyun  * kvm_mips_build_ret_to_host() - Assemble code to return to the host.
912*4882a593Smuzhiyun  * @addr:	Address to start writing code.
913*4882a593Smuzhiyun  *
914*4882a593Smuzhiyun  * Assemble the code to handle return from the guest exit handler
915*4882a593Smuzhiyun  * (kvm_mips_handle_exit()) back to the host, i.e. to the caller of the vcpu_run
916*4882a593Smuzhiyun  * function generated by kvm_mips_build_vcpu_run().
917*4882a593Smuzhiyun  *
918*4882a593Smuzhiyun  * Returns:	Next address after end of written function.
919*4882a593Smuzhiyun  */
kvm_mips_build_ret_to_host(void * addr)920*4882a593Smuzhiyun static void *kvm_mips_build_ret_to_host(void *addr)
921*4882a593Smuzhiyun {
922*4882a593Smuzhiyun 	u32 *p = addr;
923*4882a593Smuzhiyun 	unsigned int i;
924*4882a593Smuzhiyun 
925*4882a593Smuzhiyun 	/* EBASE is already pointing to Linux */
926*4882a593Smuzhiyun 	UASM_i_LW(&p, K1, offsetof(struct kvm_vcpu_arch, host_stack), K1);
927*4882a593Smuzhiyun 	UASM_i_ADDIU(&p, K1, K1, -(int)sizeof(struct pt_regs));
928*4882a593Smuzhiyun 
929*4882a593Smuzhiyun 	/*
930*4882a593Smuzhiyun 	 * r2/v0 is the return code, shift it down by 2 (arithmetic)
931*4882a593Smuzhiyun 	 * to recover the err code
932*4882a593Smuzhiyun 	 */
933*4882a593Smuzhiyun 	uasm_i_sra(&p, K0, V0, 2);
934*4882a593Smuzhiyun 	uasm_i_move(&p, V0, K0);
935*4882a593Smuzhiyun 
936*4882a593Smuzhiyun 	/* Load context saved on the host stack */
937*4882a593Smuzhiyun 	for (i = 16; i < 31; ++i) {
938*4882a593Smuzhiyun 		if (i == 24)
939*4882a593Smuzhiyun 			i = 28;
940*4882a593Smuzhiyun 		UASM_i_LW(&p, i, offsetof(struct pt_regs, regs[i]), K1);
941*4882a593Smuzhiyun 	}
942*4882a593Smuzhiyun 
943*4882a593Smuzhiyun 	/* Restore RDHWR access */
944*4882a593Smuzhiyun 	UASM_i_LA_mostly(&p, K0, (long)&hwrena);
945*4882a593Smuzhiyun 	uasm_i_lw(&p, K0, uasm_rel_lo((long)&hwrena), K0);
946*4882a593Smuzhiyun 	uasm_i_mtc0(&p, K0, C0_HWRENA);
947*4882a593Smuzhiyun 
948*4882a593Smuzhiyun 	/* Restore RA, which is the address we will return to */
949*4882a593Smuzhiyun 	UASM_i_LW(&p, RA, offsetof(struct pt_regs, regs[RA]), K1);
950*4882a593Smuzhiyun 	uasm_i_jr(&p, RA);
951*4882a593Smuzhiyun 	 uasm_i_nop(&p);
952*4882a593Smuzhiyun 
953*4882a593Smuzhiyun 	return p;
954*4882a593Smuzhiyun }
955*4882a593Smuzhiyun 
956