xref: /OK3568_Linux_fs/kernel/arch/powerpc/kvm/book3s_interrupts.S (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun/* SPDX-License-Identifier: GPL-2.0-only */
2*4882a593Smuzhiyun/*
3*4882a593Smuzhiyun *
4*4882a593Smuzhiyun * Copyright SUSE Linux Products GmbH 2009
5*4882a593Smuzhiyun *
6*4882a593Smuzhiyun * Authors: Alexander Graf <agraf@suse.de>
7*4882a593Smuzhiyun */
8*4882a593Smuzhiyun
9*4882a593Smuzhiyun#include <asm/ppc_asm.h>
10*4882a593Smuzhiyun#include <asm/kvm_asm.h>
11*4882a593Smuzhiyun#include <asm/reg.h>
12*4882a593Smuzhiyun#include <asm/page.h>
13*4882a593Smuzhiyun#include <asm/asm-offsets.h>
14*4882a593Smuzhiyun#include <asm/exception-64s.h>
15*4882a593Smuzhiyun#include <asm/asm-compat.h>
16*4882a593Smuzhiyun
17*4882a593Smuzhiyun#if defined(CONFIG_PPC_BOOK3S_64)
18*4882a593Smuzhiyun#ifdef PPC64_ELF_ABI_v2
19*4882a593Smuzhiyun#define FUNC(name) 		name
20*4882a593Smuzhiyun#else
21*4882a593Smuzhiyun#define FUNC(name) 		GLUE(.,name)
22*4882a593Smuzhiyun#endif
23*4882a593Smuzhiyun#define GET_SHADOW_VCPU(reg)    addi	reg, r13, PACA_SVCPU
24*4882a593Smuzhiyun
25*4882a593Smuzhiyun#elif defined(CONFIG_PPC_BOOK3S_32)
26*4882a593Smuzhiyun#define FUNC(name)		name
27*4882a593Smuzhiyun#define GET_SHADOW_VCPU(reg)	lwz     reg, (THREAD + THREAD_KVM_SVCPU)(r2)
28*4882a593Smuzhiyun
29*4882a593Smuzhiyun#endif /* CONFIG_PPC_BOOK3S_64 */
30*4882a593Smuzhiyun
31*4882a593Smuzhiyun#define VCPU_LOAD_NVGPRS(vcpu) \
32*4882a593Smuzhiyun	PPC_LL	r14, VCPU_GPR(R14)(vcpu); \
33*4882a593Smuzhiyun	PPC_LL	r15, VCPU_GPR(R15)(vcpu); \
34*4882a593Smuzhiyun	PPC_LL	r16, VCPU_GPR(R16)(vcpu); \
35*4882a593Smuzhiyun	PPC_LL	r17, VCPU_GPR(R17)(vcpu); \
36*4882a593Smuzhiyun	PPC_LL	r18, VCPU_GPR(R18)(vcpu); \
37*4882a593Smuzhiyun	PPC_LL	r19, VCPU_GPR(R19)(vcpu); \
38*4882a593Smuzhiyun	PPC_LL	r20, VCPU_GPR(R20)(vcpu); \
39*4882a593Smuzhiyun	PPC_LL	r21, VCPU_GPR(R21)(vcpu); \
40*4882a593Smuzhiyun	PPC_LL	r22, VCPU_GPR(R22)(vcpu); \
41*4882a593Smuzhiyun	PPC_LL	r23, VCPU_GPR(R23)(vcpu); \
42*4882a593Smuzhiyun	PPC_LL	r24, VCPU_GPR(R24)(vcpu); \
43*4882a593Smuzhiyun	PPC_LL	r25, VCPU_GPR(R25)(vcpu); \
44*4882a593Smuzhiyun	PPC_LL	r26, VCPU_GPR(R26)(vcpu); \
45*4882a593Smuzhiyun	PPC_LL	r27, VCPU_GPR(R27)(vcpu); \
46*4882a593Smuzhiyun	PPC_LL	r28, VCPU_GPR(R28)(vcpu); \
47*4882a593Smuzhiyun	PPC_LL	r29, VCPU_GPR(R29)(vcpu); \
48*4882a593Smuzhiyun	PPC_LL	r30, VCPU_GPR(R30)(vcpu); \
49*4882a593Smuzhiyun	PPC_LL	r31, VCPU_GPR(R31)(vcpu); \
50*4882a593Smuzhiyun
51*4882a593Smuzhiyun/*****************************************************************************
52*4882a593Smuzhiyun *                                                                           *
53*4882a593Smuzhiyun *     Guest entry / exit code that is in kernel module memory (highmem)     *
54*4882a593Smuzhiyun *                                                                           *
55*4882a593Smuzhiyun ****************************************************************************/
56*4882a593Smuzhiyun
57*4882a593Smuzhiyun/* Registers:
58*4882a593Smuzhiyun *  r3: vcpu pointer
59*4882a593Smuzhiyun */
60*4882a593Smuzhiyun_GLOBAL(__kvmppc_vcpu_run)
61*4882a593Smuzhiyun
62*4882a593Smuzhiyunkvm_start_entry:
63*4882a593Smuzhiyun	/* Write correct stack frame */
64*4882a593Smuzhiyun	mflr	r0
65*4882a593Smuzhiyun	PPC_STL	r0,PPC_LR_STKOFF(r1)
66*4882a593Smuzhiyun
67*4882a593Smuzhiyun	/* Save host state to the stack */
68*4882a593Smuzhiyun	PPC_STLU r1, -SWITCH_FRAME_SIZE(r1)
69*4882a593Smuzhiyun
70*4882a593Smuzhiyun	/* Save r3 (vcpu) */
71*4882a593Smuzhiyun	SAVE_GPR(3, r1)
72*4882a593Smuzhiyun
73*4882a593Smuzhiyun	/* Save non-volatile registers (r14 - r31) */
74*4882a593Smuzhiyun	SAVE_NVGPRS(r1)
75*4882a593Smuzhiyun
76*4882a593Smuzhiyun	/* Save CR */
77*4882a593Smuzhiyun	mfcr	r14
78*4882a593Smuzhiyun	stw	r14, _CCR(r1)
79*4882a593Smuzhiyun
80*4882a593Smuzhiyun	/* Save LR */
81*4882a593Smuzhiyun	PPC_STL	r0, _LINK(r1)
82*4882a593Smuzhiyun
83*4882a593Smuzhiyun	/* Load non-volatile guest state from the vcpu */
84*4882a593Smuzhiyun	VCPU_LOAD_NVGPRS(r3)
85*4882a593Smuzhiyun
86*4882a593Smuzhiyunkvm_start_lightweight:
87*4882a593Smuzhiyun	/* Copy registers into shadow vcpu so we can access them in real mode */
88*4882a593Smuzhiyun	bl	FUNC(kvmppc_copy_to_svcpu)
89*4882a593Smuzhiyun	nop
90*4882a593Smuzhiyun	REST_GPR(3, r1)
91*4882a593Smuzhiyun
92*4882a593Smuzhiyun#ifdef CONFIG_PPC_BOOK3S_64
93*4882a593Smuzhiyun	/* Get the dcbz32 flag */
94*4882a593Smuzhiyun	PPC_LL	r0, VCPU_HFLAGS(r3)
95*4882a593Smuzhiyun	rldicl	r0, r0, 0, 63		/* r3 &= 1 */
96*4882a593Smuzhiyun	stb	r0, HSTATE_RESTORE_HID5(r13)
97*4882a593Smuzhiyun
98*4882a593Smuzhiyun	/* Load up guest SPRG3 value, since it's user readable */
99*4882a593Smuzhiyun	lbz	r4, VCPU_SHAREDBE(r3)
100*4882a593Smuzhiyun	cmpwi	r4, 0
101*4882a593Smuzhiyun	ld	r5, VCPU_SHARED(r3)
102*4882a593Smuzhiyun	beq	sprg3_little_endian
103*4882a593Smuzhiyunsprg3_big_endian:
104*4882a593Smuzhiyun#ifdef __BIG_ENDIAN__
105*4882a593Smuzhiyun	ld	r4, VCPU_SHARED_SPRG3(r5)
106*4882a593Smuzhiyun#else
107*4882a593Smuzhiyun	addi	r5, r5, VCPU_SHARED_SPRG3
108*4882a593Smuzhiyun	ldbrx	r4, 0, r5
109*4882a593Smuzhiyun#endif
110*4882a593Smuzhiyun	b	after_sprg3_load
111*4882a593Smuzhiyunsprg3_little_endian:
112*4882a593Smuzhiyun#ifdef __LITTLE_ENDIAN__
113*4882a593Smuzhiyun	ld	r4, VCPU_SHARED_SPRG3(r5)
114*4882a593Smuzhiyun#else
115*4882a593Smuzhiyun	addi	r5, r5, VCPU_SHARED_SPRG3
116*4882a593Smuzhiyun	ldbrx	r4, 0, r5
117*4882a593Smuzhiyun#endif
118*4882a593Smuzhiyun
119*4882a593Smuzhiyunafter_sprg3_load:
120*4882a593Smuzhiyun	mtspr	SPRN_SPRG3, r4
121*4882a593Smuzhiyun#endif /* CONFIG_PPC_BOOK3S_64 */
122*4882a593Smuzhiyun
123*4882a593Smuzhiyun	PPC_LL	r4, VCPU_SHADOW_MSR(r3)	/* get shadow_msr */
124*4882a593Smuzhiyun
125*4882a593Smuzhiyun	/* Jump to segment patching handler and into our guest */
126*4882a593Smuzhiyun	bl	FUNC(kvmppc_entry_trampoline)
127*4882a593Smuzhiyun	nop
128*4882a593Smuzhiyun
129*4882a593Smuzhiyun/*
130*4882a593Smuzhiyun * This is the handler in module memory. It gets jumped at from the
131*4882a593Smuzhiyun * lowmem trampoline code, so it's basically the guest exit code.
132*4882a593Smuzhiyun *
133*4882a593Smuzhiyun */
134*4882a593Smuzhiyun
135*4882a593Smuzhiyun	/*
136*4882a593Smuzhiyun	 * Register usage at this point:
137*4882a593Smuzhiyun	 *
138*4882a593Smuzhiyun	 * R1       = host R1
139*4882a593Smuzhiyun	 * R2       = host R2
140*4882a593Smuzhiyun	 * R12      = exit handler id
141*4882a593Smuzhiyun	 * R13      = PACA
142*4882a593Smuzhiyun	 * SVCPU.*  = guest *
143*4882a593Smuzhiyun	 * MSR.EE   = 1
144*4882a593Smuzhiyun	 *
145*4882a593Smuzhiyun	 */
146*4882a593Smuzhiyun
147*4882a593Smuzhiyun	PPC_LL	r3, GPR3(r1)		/* vcpu pointer */
148*4882a593Smuzhiyun
149*4882a593Smuzhiyun	/*
150*4882a593Smuzhiyun	 * kvmppc_copy_from_svcpu can clobber volatile registers, save
151*4882a593Smuzhiyun	 * the exit handler id to the vcpu and restore it from there later.
152*4882a593Smuzhiyun	 */
153*4882a593Smuzhiyun	stw	r12, VCPU_TRAP(r3)
154*4882a593Smuzhiyun
155*4882a593Smuzhiyun	/* Transfer reg values from shadow vcpu back to vcpu struct */
156*4882a593Smuzhiyun
157*4882a593Smuzhiyun	bl	FUNC(kvmppc_copy_from_svcpu)
158*4882a593Smuzhiyun	nop
159*4882a593Smuzhiyun
160*4882a593Smuzhiyun#ifdef CONFIG_PPC_BOOK3S_64
161*4882a593Smuzhiyun	/*
162*4882a593Smuzhiyun	 * Reload kernel SPRG3 value.
163*4882a593Smuzhiyun	 * No need to save guest value as usermode can't modify SPRG3.
164*4882a593Smuzhiyun	 */
165*4882a593Smuzhiyun	ld	r3, PACA_SPRG_VDSO(r13)
166*4882a593Smuzhiyun	mtspr	SPRN_SPRG_VDSO_WRITE, r3
167*4882a593Smuzhiyun#endif /* CONFIG_PPC_BOOK3S_64 */
168*4882a593Smuzhiyun
169*4882a593Smuzhiyun	/* R7 = vcpu */
170*4882a593Smuzhiyun	PPC_LL	r7, GPR3(r1)
171*4882a593Smuzhiyun
172*4882a593Smuzhiyun	PPC_STL	r14, VCPU_GPR(R14)(r7)
173*4882a593Smuzhiyun	PPC_STL	r15, VCPU_GPR(R15)(r7)
174*4882a593Smuzhiyun	PPC_STL	r16, VCPU_GPR(R16)(r7)
175*4882a593Smuzhiyun	PPC_STL	r17, VCPU_GPR(R17)(r7)
176*4882a593Smuzhiyun	PPC_STL	r18, VCPU_GPR(R18)(r7)
177*4882a593Smuzhiyun	PPC_STL	r19, VCPU_GPR(R19)(r7)
178*4882a593Smuzhiyun	PPC_STL	r20, VCPU_GPR(R20)(r7)
179*4882a593Smuzhiyun	PPC_STL	r21, VCPU_GPR(R21)(r7)
180*4882a593Smuzhiyun	PPC_STL	r22, VCPU_GPR(R22)(r7)
181*4882a593Smuzhiyun	PPC_STL	r23, VCPU_GPR(R23)(r7)
182*4882a593Smuzhiyun	PPC_STL	r24, VCPU_GPR(R24)(r7)
183*4882a593Smuzhiyun	PPC_STL	r25, VCPU_GPR(R25)(r7)
184*4882a593Smuzhiyun	PPC_STL	r26, VCPU_GPR(R26)(r7)
185*4882a593Smuzhiyun	PPC_STL	r27, VCPU_GPR(R27)(r7)
186*4882a593Smuzhiyun	PPC_STL	r28, VCPU_GPR(R28)(r7)
187*4882a593Smuzhiyun	PPC_STL	r29, VCPU_GPR(R29)(r7)
188*4882a593Smuzhiyun	PPC_STL	r30, VCPU_GPR(R30)(r7)
189*4882a593Smuzhiyun	PPC_STL	r31, VCPU_GPR(R31)(r7)
190*4882a593Smuzhiyun
191*4882a593Smuzhiyun	/* Pass the exit number as 2nd argument to kvmppc_handle_exit */
192*4882a593Smuzhiyun	lwz	r4, VCPU_TRAP(r7)
193*4882a593Smuzhiyun
194*4882a593Smuzhiyun	/* Restore r3 (vcpu) */
195*4882a593Smuzhiyun	REST_GPR(3, r1)
196*4882a593Smuzhiyun	bl	FUNC(kvmppc_handle_exit_pr)
197*4882a593Smuzhiyun
198*4882a593Smuzhiyun	/* If RESUME_GUEST, get back in the loop */
199*4882a593Smuzhiyun	cmpwi	r3, RESUME_GUEST
200*4882a593Smuzhiyun	beq	kvm_loop_lightweight
201*4882a593Smuzhiyun
202*4882a593Smuzhiyun	cmpwi	r3, RESUME_GUEST_NV
203*4882a593Smuzhiyun	beq	kvm_loop_heavyweight
204*4882a593Smuzhiyun
205*4882a593Smuzhiyunkvm_exit_loop:
206*4882a593Smuzhiyun
207*4882a593Smuzhiyun	PPC_LL	r4, _LINK(r1)
208*4882a593Smuzhiyun	mtlr	r4
209*4882a593Smuzhiyun
210*4882a593Smuzhiyun	lwz	r14, _CCR(r1)
211*4882a593Smuzhiyun	mtcr	r14
212*4882a593Smuzhiyun
213*4882a593Smuzhiyun	/* Restore non-volatile host registers (r14 - r31) */
214*4882a593Smuzhiyun	REST_NVGPRS(r1)
215*4882a593Smuzhiyun
216*4882a593Smuzhiyun	addi    r1, r1, SWITCH_FRAME_SIZE
217*4882a593Smuzhiyun	blr
218*4882a593Smuzhiyun
219*4882a593Smuzhiyunkvm_loop_heavyweight:
220*4882a593Smuzhiyun
221*4882a593Smuzhiyun	PPC_LL	r4, _LINK(r1)
222*4882a593Smuzhiyun	PPC_STL r4, (PPC_LR_STKOFF + SWITCH_FRAME_SIZE)(r1)
223*4882a593Smuzhiyun
224*4882a593Smuzhiyun	/* Load vcpu */
225*4882a593Smuzhiyun	REST_GPR(3, r1)
226*4882a593Smuzhiyun
227*4882a593Smuzhiyun	/* Load non-volatile guest state from the vcpu */
228*4882a593Smuzhiyun	VCPU_LOAD_NVGPRS(r3)
229*4882a593Smuzhiyun
230*4882a593Smuzhiyun	/* Jump back into the beginning of this function */
231*4882a593Smuzhiyun	b	kvm_start_lightweight
232*4882a593Smuzhiyun
233*4882a593Smuzhiyunkvm_loop_lightweight:
234*4882a593Smuzhiyun
235*4882a593Smuzhiyun	/* We'll need the vcpu pointer */
236*4882a593Smuzhiyun	REST_GPR(3, r1)
237*4882a593Smuzhiyun
238*4882a593Smuzhiyun	/* Jump back into the beginning of this function */
239*4882a593Smuzhiyun	b	kvm_start_lightweight
240