xref: /OK3568_Linux_fs/kernel/arch/arm64/kvm/hyp/nvhe/host.S (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun/* SPDX-License-Identifier: GPL-2.0-only */
2*4882a593Smuzhiyun/*
3*4882a593Smuzhiyun * Copyright (C) 2020 - Google Inc
4*4882a593Smuzhiyun * Author: Andrew Scull <ascull@google.com>
5*4882a593Smuzhiyun */
6*4882a593Smuzhiyun
7*4882a593Smuzhiyun#include <linux/linkage.h>
8*4882a593Smuzhiyun
9*4882a593Smuzhiyun#include <asm/assembler.h>
10*4882a593Smuzhiyun#include <asm/kvm_asm.h>
11*4882a593Smuzhiyun#include <asm/kvm_mmu.h>
12*4882a593Smuzhiyun
13*4882a593Smuzhiyun	.text
14*4882a593Smuzhiyun
15*4882a593SmuzhiyunSYM_FUNC_START(__host_exit)
16*4882a593Smuzhiyun	get_host_ctxt	x0, x1
17*4882a593Smuzhiyun
18*4882a593Smuzhiyun	/* Store the host regs x2 and x3 */
19*4882a593Smuzhiyun	stp	x2, x3,   [x0, #CPU_XREG_OFFSET(2)]
20*4882a593Smuzhiyun
21*4882a593Smuzhiyun	/* Retrieve the host regs x0-x1 from the stack */
22*4882a593Smuzhiyun	ldp	x2, x3, [sp], #16	// x0, x1
23*4882a593Smuzhiyun
24*4882a593Smuzhiyun	/* Store the host regs x0-x1 and x4-x17 */
25*4882a593Smuzhiyun	stp	x2, x3,   [x0, #CPU_XREG_OFFSET(0)]
26*4882a593Smuzhiyun	stp	x4, x5,   [x0, #CPU_XREG_OFFSET(4)]
27*4882a593Smuzhiyun	stp	x6, x7,   [x0, #CPU_XREG_OFFSET(6)]
28*4882a593Smuzhiyun	stp	x8, x9,   [x0, #CPU_XREG_OFFSET(8)]
29*4882a593Smuzhiyun	stp	x10, x11, [x0, #CPU_XREG_OFFSET(10)]
30*4882a593Smuzhiyun	stp	x12, x13, [x0, #CPU_XREG_OFFSET(12)]
31*4882a593Smuzhiyun	stp	x14, x15, [x0, #CPU_XREG_OFFSET(14)]
32*4882a593Smuzhiyun	stp	x16, x17, [x0, #CPU_XREG_OFFSET(16)]
33*4882a593Smuzhiyun
34*4882a593Smuzhiyun	/* Store the host regs x18-x29, lr */
35*4882a593Smuzhiyun	save_callee_saved_regs x0
36*4882a593Smuzhiyun
37*4882a593Smuzhiyun	/* Save the host context pointer in x29 across the function call */
38*4882a593Smuzhiyun	mov	x29, x0
39*4882a593Smuzhiyun	bl	handle_trap
40*4882a593Smuzhiyun
41*4882a593Smuzhiyun	/* Restore host regs x0-x17 */
42*4882a593Smuzhiyun__host_enter_restore_full:
43*4882a593Smuzhiyun	ldp	x0, x1,   [x29, #CPU_XREG_OFFSET(0)]
44*4882a593Smuzhiyun	ldp	x2, x3,   [x29, #CPU_XREG_OFFSET(2)]
45*4882a593Smuzhiyun	ldp	x4, x5,   [x29, #CPU_XREG_OFFSET(4)]
46*4882a593Smuzhiyun	ldp	x6, x7,   [x29, #CPU_XREG_OFFSET(6)]
47*4882a593Smuzhiyun
48*4882a593Smuzhiyun	/* x0-7 are use for panic arguments */
49*4882a593Smuzhiyun__host_enter_for_panic:
50*4882a593Smuzhiyun	ldp	x8, x9,   [x29, #CPU_XREG_OFFSET(8)]
51*4882a593Smuzhiyun	ldp	x10, x11, [x29, #CPU_XREG_OFFSET(10)]
52*4882a593Smuzhiyun	ldp	x12, x13, [x29, #CPU_XREG_OFFSET(12)]
53*4882a593Smuzhiyun	ldp	x14, x15, [x29, #CPU_XREG_OFFSET(14)]
54*4882a593Smuzhiyun	ldp	x16, x17, [x29, #CPU_XREG_OFFSET(16)]
55*4882a593Smuzhiyun
56*4882a593Smuzhiyun	/* Restore host regs x18-x29, lr */
57*4882a593Smuzhiyun	restore_callee_saved_regs x29
58*4882a593Smuzhiyun
59*4882a593Smuzhiyun	/* Do not touch any register after this! */
60*4882a593Smuzhiyun__host_enter_without_restoring:
61*4882a593Smuzhiyun	eret
62*4882a593Smuzhiyun	sb
63*4882a593SmuzhiyunSYM_FUNC_END(__host_exit)
64*4882a593Smuzhiyun
65*4882a593Smuzhiyun/*
66*4882a593Smuzhiyun * void __noreturn __host_enter(struct kvm_cpu_context *host_ctxt);
67*4882a593Smuzhiyun */
68*4882a593SmuzhiyunSYM_FUNC_START(__host_enter)
69*4882a593Smuzhiyun	mov	x29, x0
70*4882a593Smuzhiyun	b	__host_enter_restore_full
71*4882a593SmuzhiyunSYM_FUNC_END(__host_enter)
72*4882a593Smuzhiyun
73*4882a593Smuzhiyun/*
74*4882a593Smuzhiyun * void __noreturn __hyp_do_panic(struct kvm_cpu_context *host_ctxt, u64 spsr,
75*4882a593Smuzhiyun * 				  u64 elr, u64 par);
76*4882a593Smuzhiyun */
77*4882a593SmuzhiyunSYM_FUNC_START(__hyp_do_panic)
78*4882a593Smuzhiyun	/* Prepare and exit to the host's panic funciton. */
79*4882a593Smuzhiyun	mov	lr, #(PSR_F_BIT | PSR_I_BIT | PSR_A_BIT | PSR_D_BIT |\
80*4882a593Smuzhiyun		      PSR_MODE_EL1h)
81*4882a593Smuzhiyun	msr	spsr_el2, lr
82*4882a593Smuzhiyun	ldr	lr, =panic
83*4882a593Smuzhiyun	hyp_kimg_va lr, x6
84*4882a593Smuzhiyun	msr	elr_el2, lr
85*4882a593Smuzhiyun
86*4882a593Smuzhiyun	mov	x29, x0
87*4882a593Smuzhiyun
88*4882a593Smuzhiyun	/* Load the format string into x0 and arguments into x1-7 */
89*4882a593Smuzhiyun	ldr	x0, =__hyp_panic_string
90*4882a593Smuzhiyun	hyp_kimg_va x0, x6
91*4882a593Smuzhiyun
92*4882a593Smuzhiyun	/* Load the format arguments into x1-7. */
93*4882a593Smuzhiyun	mov	x6, x3
94*4882a593Smuzhiyun	get_vcpu_ptr x7, x3
95*4882a593Smuzhiyun	mrs	x3, esr_el2
96*4882a593Smuzhiyun	mrs	x4, far_el2
97*4882a593Smuzhiyun	mrs	x5, hpfar_el2
98*4882a593Smuzhiyun
99*4882a593Smuzhiyun	/* Enter the host, conditionally restoring the host context. */
100*4882a593Smuzhiyun	cbz	x29, __host_enter_without_restoring
101*4882a593Smuzhiyun	b	__host_enter_for_panic
102*4882a593SmuzhiyunSYM_FUNC_END(__hyp_do_panic)
103*4882a593Smuzhiyun
104*4882a593Smuzhiyun.macro host_el1_sync_vect
105*4882a593Smuzhiyun	.align 7
106*4882a593Smuzhiyun.L__vect_start\@:
107*4882a593Smuzhiyun	stp	x0, x1, [sp, #-16]!
108*4882a593Smuzhiyun	mrs	x0, esr_el2
109*4882a593Smuzhiyun	ubfx	x0, x0, #ESR_ELx_EC_SHIFT, #ESR_ELx_EC_WIDTH
110*4882a593Smuzhiyun	cmp	x0, #ESR_ELx_EC_HVC64
111*4882a593Smuzhiyun	b.ne	__host_exit
112*4882a593Smuzhiyun
113*4882a593Smuzhiyun	ldp	x0, x1, [sp]		// Don't fixup the stack yet
114*4882a593Smuzhiyun
115*4882a593Smuzhiyun	/* Check for a stub HVC call */
116*4882a593Smuzhiyun	cmp	x0, #HVC_STUB_HCALL_NR
117*4882a593Smuzhiyun	b.hs	__host_exit
118*4882a593Smuzhiyun
119*4882a593Smuzhiyun	add	sp, sp, #16
120*4882a593Smuzhiyun	/*
121*4882a593Smuzhiyun	 * Compute the idmap address of __kvm_handle_stub_hvc and
122*4882a593Smuzhiyun	 * jump there. Since we use kimage_voffset, do not use the
123*4882a593Smuzhiyun	 * HYP VA for __kvm_handle_stub_hvc, but the kernel VA instead
124*4882a593Smuzhiyun	 * (by loading it from the constant pool).
125*4882a593Smuzhiyun	 *
126*4882a593Smuzhiyun	 * Preserve x0-x4, which may contain stub parameters.
127*4882a593Smuzhiyun	 */
128*4882a593Smuzhiyun	ldr	x5, =__kvm_handle_stub_hvc
129*4882a593Smuzhiyun	hyp_pa	x5, x6
130*4882a593Smuzhiyun	br	x5
131*4882a593Smuzhiyun.L__vect_end\@:
132*4882a593Smuzhiyun.if ((.L__vect_end\@ - .L__vect_start\@) > 0x80)
133*4882a593Smuzhiyun	.error "host_el1_sync_vect larger than vector entry"
134*4882a593Smuzhiyun.endif
135*4882a593Smuzhiyun.endm
136*4882a593Smuzhiyun
137*4882a593Smuzhiyun.macro invalid_host_el2_vect
138*4882a593Smuzhiyun	.align 7
139*4882a593Smuzhiyun	/* If a guest is loaded, panic out of it. */
140*4882a593Smuzhiyun	stp	x0, x1, [sp, #-16]!
141*4882a593Smuzhiyun	get_loaded_vcpu x0, x1
142*4882a593Smuzhiyun	cbnz	x0, __guest_exit_panic
143*4882a593Smuzhiyun	add	sp, sp, #16
144*4882a593Smuzhiyun
145*4882a593Smuzhiyun	/*
146*4882a593Smuzhiyun	 * The panic may not be clean if the exception is taken before the host
147*4882a593Smuzhiyun	 * context has been saved by __host_exit or after the hyp context has
148*4882a593Smuzhiyun	 * been partially clobbered by __host_enter.
149*4882a593Smuzhiyun	 */
150*4882a593Smuzhiyun	b	hyp_panic
151*4882a593Smuzhiyun.endm
152*4882a593Smuzhiyun
153*4882a593Smuzhiyun.macro invalid_host_el1_vect
154*4882a593Smuzhiyun	.align 7
155*4882a593Smuzhiyun	mov	x0, xzr		/* host_ctxt = NULL */
156*4882a593Smuzhiyun	mrs	x1, spsr_el2
157*4882a593Smuzhiyun	mrs	x2, elr_el2
158*4882a593Smuzhiyun	mrs	x3, par_el1
159*4882a593Smuzhiyun	b	__hyp_do_panic
160*4882a593Smuzhiyun.endm
161*4882a593Smuzhiyun
162*4882a593Smuzhiyun/*
163*4882a593Smuzhiyun * The host vector does not use an ESB instruction in order to avoid consuming
164*4882a593Smuzhiyun * SErrors that should only be consumed by the host. Guest entry is deferred by
165*4882a593Smuzhiyun * __guest_enter if there are any pending asynchronous exceptions so hyp will
166*4882a593Smuzhiyun * always return to the host without having consumerd host SErrors.
167*4882a593Smuzhiyun *
168*4882a593Smuzhiyun * CONFIG_KVM_INDIRECT_VECTORS is not applied to the host vectors because the
169*4882a593Smuzhiyun * host knows about the EL2 vectors already, and there is no point in hiding
170*4882a593Smuzhiyun * them.
171*4882a593Smuzhiyun */
172*4882a593Smuzhiyun	.align 11
173*4882a593SmuzhiyunSYM_CODE_START(__kvm_hyp_host_vector)
174*4882a593Smuzhiyun	invalid_host_el2_vect			// Synchronous EL2t
175*4882a593Smuzhiyun	invalid_host_el2_vect			// IRQ EL2t
176*4882a593Smuzhiyun	invalid_host_el2_vect			// FIQ EL2t
177*4882a593Smuzhiyun	invalid_host_el2_vect			// Error EL2t
178*4882a593Smuzhiyun
179*4882a593Smuzhiyun	invalid_host_el2_vect			// Synchronous EL2h
180*4882a593Smuzhiyun	invalid_host_el2_vect			// IRQ EL2h
181*4882a593Smuzhiyun	invalid_host_el2_vect			// FIQ EL2h
182*4882a593Smuzhiyun	invalid_host_el2_vect			// Error EL2h
183*4882a593Smuzhiyun
184*4882a593Smuzhiyun	host_el1_sync_vect			// Synchronous 64-bit EL1
185*4882a593Smuzhiyun	invalid_host_el1_vect			// IRQ 64-bit EL1
186*4882a593Smuzhiyun	invalid_host_el1_vect			// FIQ 64-bit EL1
187*4882a593Smuzhiyun	invalid_host_el1_vect			// Error 64-bit EL1
188*4882a593Smuzhiyun
189*4882a593Smuzhiyun	invalid_host_el1_vect			// Synchronous 32-bit EL1
190*4882a593Smuzhiyun	invalid_host_el1_vect			// IRQ 32-bit EL1
191*4882a593Smuzhiyun	invalid_host_el1_vect			// FIQ 32-bit EL1
192*4882a593Smuzhiyun	invalid_host_el1_vect			// Error 32-bit EL1
193*4882a593SmuzhiyunSYM_CODE_END(__kvm_hyp_host_vector)
194*4882a593Smuzhiyun
195*4882a593Smuzhiyun/*
196*4882a593Smuzhiyun * Forward SMC with arguments in struct kvm_cpu_context, and
197*4882a593Smuzhiyun * store the result into the same struct. Assumes SMCCC 1.2 or older.
198*4882a593Smuzhiyun *
199*4882a593Smuzhiyun * x0: struct kvm_cpu_context*
200*4882a593Smuzhiyun */
201*4882a593SmuzhiyunSYM_CODE_START(__kvm_hyp_host_forward_smc)
202*4882a593Smuzhiyun	/*
203*4882a593Smuzhiyun	 * Use x18 to keep the pointer to the host context because
204*4882a593Smuzhiyun	 * x18 is callee-saved in SMCCC but not in AAPCS64.
205*4882a593Smuzhiyun	 */
206*4882a593Smuzhiyun	mov	x18, x0
207*4882a593Smuzhiyun
208*4882a593Smuzhiyun	ldp	x0, x1,   [x18, #CPU_XREG_OFFSET(0)]
209*4882a593Smuzhiyun	ldp	x2, x3,   [x18, #CPU_XREG_OFFSET(2)]
210*4882a593Smuzhiyun	ldp	x4, x5,   [x18, #CPU_XREG_OFFSET(4)]
211*4882a593Smuzhiyun	ldp	x6, x7,   [x18, #CPU_XREG_OFFSET(6)]
212*4882a593Smuzhiyun	ldp	x8, x9,   [x18, #CPU_XREG_OFFSET(8)]
213*4882a593Smuzhiyun	ldp	x10, x11, [x18, #CPU_XREG_OFFSET(10)]
214*4882a593Smuzhiyun	ldp	x12, x13, [x18, #CPU_XREG_OFFSET(12)]
215*4882a593Smuzhiyun	ldp	x14, x15, [x18, #CPU_XREG_OFFSET(14)]
216*4882a593Smuzhiyun	ldp	x16, x17, [x18, #CPU_XREG_OFFSET(16)]
217*4882a593Smuzhiyun
218*4882a593Smuzhiyun	smc	#0
219*4882a593Smuzhiyun
220*4882a593Smuzhiyun	stp	x0, x1,   [x18, #CPU_XREG_OFFSET(0)]
221*4882a593Smuzhiyun	stp	x2, x3,   [x18, #CPU_XREG_OFFSET(2)]
222*4882a593Smuzhiyun	stp	x4, x5,   [x18, #CPU_XREG_OFFSET(4)]
223*4882a593Smuzhiyun	stp	x6, x7,   [x18, #CPU_XREG_OFFSET(6)]
224*4882a593Smuzhiyun	stp	x8, x9,   [x18, #CPU_XREG_OFFSET(8)]
225*4882a593Smuzhiyun	stp	x10, x11, [x18, #CPU_XREG_OFFSET(10)]
226*4882a593Smuzhiyun	stp	x12, x13, [x18, #CPU_XREG_OFFSET(12)]
227*4882a593Smuzhiyun	stp	x14, x15, [x18, #CPU_XREG_OFFSET(14)]
228*4882a593Smuzhiyun	stp	x16, x17, [x18, #CPU_XREG_OFFSET(16)]
229*4882a593Smuzhiyun
230*4882a593Smuzhiyun	ret
231*4882a593SmuzhiyunSYM_CODE_END(__kvm_hyp_host_forward_smc)
232