xref: /OK3568_Linux_fs/kernel/arch/arm64/kvm/hyp/entry.S (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun/* SPDX-License-Identifier: GPL-2.0-only */
2*4882a593Smuzhiyun/*
3*4882a593Smuzhiyun * Copyright (C) 2015 - ARM Ltd
4*4882a593Smuzhiyun * Author: Marc Zyngier <marc.zyngier@arm.com>
5*4882a593Smuzhiyun */
6*4882a593Smuzhiyun
7*4882a593Smuzhiyun#include <linux/linkage.h>
8*4882a593Smuzhiyun
9*4882a593Smuzhiyun#include <asm/alternative.h>
10*4882a593Smuzhiyun#include <asm/assembler.h>
11*4882a593Smuzhiyun#include <asm/fpsimdmacros.h>
12*4882a593Smuzhiyun#include <asm/kvm.h>
13*4882a593Smuzhiyun#include <asm/kvm_arm.h>
14*4882a593Smuzhiyun#include <asm/kvm_asm.h>
15*4882a593Smuzhiyun#include <asm/kvm_mmu.h>
16*4882a593Smuzhiyun#include <asm/kvm_ptrauth.h>
17*4882a593Smuzhiyun
18*4882a593Smuzhiyun	.text
19*4882a593Smuzhiyun
20*4882a593Smuzhiyun/*
21*4882a593Smuzhiyun * u64 __guest_enter(struct kvm_vcpu *vcpu);
22*4882a593Smuzhiyun */
23*4882a593SmuzhiyunSYM_FUNC_START(__guest_enter)
24*4882a593Smuzhiyun	// x0: vcpu
25*4882a593Smuzhiyun	// x1-x17: clobbered by macros
26*4882a593Smuzhiyun	// x29: guest context
27*4882a593Smuzhiyun
28*4882a593Smuzhiyun	adr_this_cpu x1, kvm_hyp_ctxt, x2
29*4882a593Smuzhiyun
30*4882a593Smuzhiyun	// Store the hyp regs
31*4882a593Smuzhiyun	save_callee_saved_regs x1
32*4882a593Smuzhiyun
33*4882a593Smuzhiyun	// Save hyp's sp_el0
34*4882a593Smuzhiyun	save_sp_el0	x1, x2
35*4882a593Smuzhiyun
36*4882a593Smuzhiyun	// Now the hyp state is stored if we have a pending RAS SError it must
37*4882a593Smuzhiyun	// affect the host or hyp. If any asynchronous exception is pending we
38*4882a593Smuzhiyun	// defer the guest entry. The DSB isn't necessary before v8.2 as any
39*4882a593Smuzhiyun	// SError would be fatal.
40*4882a593Smuzhiyunalternative_if ARM64_HAS_RAS_EXTN
41*4882a593Smuzhiyun	dsb	nshst
42*4882a593Smuzhiyun	isb
43*4882a593Smuzhiyunalternative_else_nop_endif
44*4882a593Smuzhiyun	mrs	x1, isr_el1
45*4882a593Smuzhiyun	cbz	x1,  1f
46*4882a593Smuzhiyun	mov	x0, #ARM_EXCEPTION_IRQ
47*4882a593Smuzhiyun	ret
48*4882a593Smuzhiyun
49*4882a593Smuzhiyun1:
50*4882a593Smuzhiyun	set_loaded_vcpu x0, x1, x2
51*4882a593Smuzhiyun
52*4882a593Smuzhiyun	add	x29, x0, #VCPU_CONTEXT
53*4882a593Smuzhiyun
54*4882a593Smuzhiyun	// Macro ptrauth_switch_to_guest format:
55*4882a593Smuzhiyun	// 	ptrauth_switch_to_guest(guest cxt, tmp1, tmp2, tmp3)
56*4882a593Smuzhiyun	// The below macro to restore guest keys is not implemented in C code
57*4882a593Smuzhiyun	// as it may cause Pointer Authentication key signing mismatch errors
58*4882a593Smuzhiyun	// when this feature is enabled for kernel code.
59*4882a593Smuzhiyun	ptrauth_switch_to_guest x29, x0, x1, x2
60*4882a593Smuzhiyun
61*4882a593Smuzhiyun	// Restore the guest's sp_el0
62*4882a593Smuzhiyun	restore_sp_el0 x29, x0
63*4882a593Smuzhiyun
64*4882a593Smuzhiyun	// Restore guest regs x0-x17
65*4882a593Smuzhiyun	ldp	x0, x1,   [x29, #CPU_XREG_OFFSET(0)]
66*4882a593Smuzhiyun	ldp	x2, x3,   [x29, #CPU_XREG_OFFSET(2)]
67*4882a593Smuzhiyun	ldp	x4, x5,   [x29, #CPU_XREG_OFFSET(4)]
68*4882a593Smuzhiyun	ldp	x6, x7,   [x29, #CPU_XREG_OFFSET(6)]
69*4882a593Smuzhiyun	ldp	x8, x9,   [x29, #CPU_XREG_OFFSET(8)]
70*4882a593Smuzhiyun	ldp	x10, x11, [x29, #CPU_XREG_OFFSET(10)]
71*4882a593Smuzhiyun	ldp	x12, x13, [x29, #CPU_XREG_OFFSET(12)]
72*4882a593Smuzhiyun	ldp	x14, x15, [x29, #CPU_XREG_OFFSET(14)]
73*4882a593Smuzhiyun	ldp	x16, x17, [x29, #CPU_XREG_OFFSET(16)]
74*4882a593Smuzhiyun
75*4882a593Smuzhiyun	// Restore guest regs x18-x29, lr
76*4882a593Smuzhiyun	restore_callee_saved_regs x29
77*4882a593Smuzhiyun
78*4882a593Smuzhiyun	// Do not touch any register after this!
79*4882a593Smuzhiyun	eret
80*4882a593Smuzhiyun	sb
81*4882a593Smuzhiyun
82*4882a593SmuzhiyunSYM_INNER_LABEL(__guest_exit_panic, SYM_L_GLOBAL)
83*4882a593Smuzhiyun	// x2-x29,lr: vcpu regs
84*4882a593Smuzhiyun	// vcpu x0-x1 on the stack
85*4882a593Smuzhiyun
86*4882a593Smuzhiyun	// If the hyp context is loaded, go straight to hyp_panic
87*4882a593Smuzhiyun	get_loaded_vcpu x0, x1
88*4882a593Smuzhiyun	cbnz	x0, 1f
89*4882a593Smuzhiyun	b	hyp_panic
90*4882a593Smuzhiyun
91*4882a593Smuzhiyun1:
92*4882a593Smuzhiyun	// The hyp context is saved so make sure it is restored to allow
93*4882a593Smuzhiyun	// hyp_panic to run at hyp and, subsequently, panic to run in the host.
94*4882a593Smuzhiyun	// This makes use of __guest_exit to avoid duplication but sets the
95*4882a593Smuzhiyun	// return address to tail call into hyp_panic. As a side effect, the
96*4882a593Smuzhiyun	// current state is saved to the guest context but it will only be
97*4882a593Smuzhiyun	// accurate if the guest had been completely restored.
98*4882a593Smuzhiyun	adr_this_cpu x0, kvm_hyp_ctxt, x1
99*4882a593Smuzhiyun	adr_l	x1, hyp_panic
100*4882a593Smuzhiyun	str	x1, [x0, #CPU_XREG_OFFSET(30)]
101*4882a593Smuzhiyun
102*4882a593Smuzhiyun	get_vcpu_ptr	x1, x0
103*4882a593Smuzhiyun
104*4882a593SmuzhiyunSYM_INNER_LABEL(__guest_exit, SYM_L_GLOBAL)
105*4882a593Smuzhiyun	// x0: return code
106*4882a593Smuzhiyun	// x1: vcpu
107*4882a593Smuzhiyun	// x2-x29,lr: vcpu regs
108*4882a593Smuzhiyun	// vcpu x0-x1 on the stack
109*4882a593Smuzhiyun
110*4882a593Smuzhiyun	add	x1, x1, #VCPU_CONTEXT
111*4882a593Smuzhiyun
112*4882a593Smuzhiyun	ALTERNATIVE(nop, SET_PSTATE_PAN(1), ARM64_HAS_PAN, CONFIG_ARM64_PAN)
113*4882a593Smuzhiyun
114*4882a593Smuzhiyun	// Store the guest regs x2 and x3
115*4882a593Smuzhiyun	stp	x2, x3,   [x1, #CPU_XREG_OFFSET(2)]
116*4882a593Smuzhiyun
117*4882a593Smuzhiyun	// Retrieve the guest regs x0-x1 from the stack
118*4882a593Smuzhiyun	ldp	x2, x3, [sp], #16	// x0, x1
119*4882a593Smuzhiyun
120*4882a593Smuzhiyun	// Store the guest regs x0-x1 and x4-x17
121*4882a593Smuzhiyun	stp	x2, x3,   [x1, #CPU_XREG_OFFSET(0)]
122*4882a593Smuzhiyun	stp	x4, x5,   [x1, #CPU_XREG_OFFSET(4)]
123*4882a593Smuzhiyun	stp	x6, x7,   [x1, #CPU_XREG_OFFSET(6)]
124*4882a593Smuzhiyun	stp	x8, x9,   [x1, #CPU_XREG_OFFSET(8)]
125*4882a593Smuzhiyun	stp	x10, x11, [x1, #CPU_XREG_OFFSET(10)]
126*4882a593Smuzhiyun	stp	x12, x13, [x1, #CPU_XREG_OFFSET(12)]
127*4882a593Smuzhiyun	stp	x14, x15, [x1, #CPU_XREG_OFFSET(14)]
128*4882a593Smuzhiyun	stp	x16, x17, [x1, #CPU_XREG_OFFSET(16)]
129*4882a593Smuzhiyun
130*4882a593Smuzhiyun	// Store the guest regs x18-x29, lr
131*4882a593Smuzhiyun	save_callee_saved_regs x1
132*4882a593Smuzhiyun
133*4882a593Smuzhiyun	// Store the guest's sp_el0
134*4882a593Smuzhiyun	save_sp_el0	x1, x2
135*4882a593Smuzhiyun
136*4882a593Smuzhiyun	adr_this_cpu x2, kvm_hyp_ctxt, x3
137*4882a593Smuzhiyun
138*4882a593Smuzhiyun	// Macro ptrauth_switch_to_hyp format:
139*4882a593Smuzhiyun	// 	ptrauth_switch_to_hyp(guest cxt, host cxt, tmp1, tmp2, tmp3)
140*4882a593Smuzhiyun	// The below macro to save/restore keys is not implemented in C code
141*4882a593Smuzhiyun	// as it may cause Pointer Authentication key signing mismatch errors
142*4882a593Smuzhiyun	// when this feature is enabled for kernel code.
143*4882a593Smuzhiyun	ptrauth_switch_to_hyp x1, x2, x3, x4, x5
144*4882a593Smuzhiyun
145*4882a593Smuzhiyun	// Restore hyp's sp_el0
146*4882a593Smuzhiyun	restore_sp_el0 x2, x3
147*4882a593Smuzhiyun
148*4882a593Smuzhiyun	// Now restore the hyp regs
149*4882a593Smuzhiyun	restore_callee_saved_regs x2
150*4882a593Smuzhiyun
151*4882a593Smuzhiyun	set_loaded_vcpu xzr, x2, x3
152*4882a593Smuzhiyun
153*4882a593Smuzhiyunalternative_if ARM64_HAS_RAS_EXTN
154*4882a593Smuzhiyun	// If we have the RAS extensions we can consume a pending error
155*4882a593Smuzhiyun	// without an unmask-SError and isb. The ESB-instruction consumed any
156*4882a593Smuzhiyun	// pending guest error when we took the exception from the guest.
157*4882a593Smuzhiyun	mrs_s	x2, SYS_DISR_EL1
158*4882a593Smuzhiyun	str	x2, [x1, #(VCPU_FAULT_DISR - VCPU_CONTEXT)]
159*4882a593Smuzhiyun	cbz	x2, 1f
160*4882a593Smuzhiyun	msr_s	SYS_DISR_EL1, xzr
161*4882a593Smuzhiyun	orr	x0, x0, #(1<<ARM_EXIT_WITH_SERROR_BIT)
162*4882a593Smuzhiyun1:	ret
163*4882a593Smuzhiyunalternative_else
164*4882a593Smuzhiyun	dsb	sy		// Synchronize against in-flight ld/st
165*4882a593Smuzhiyun	isb			// Prevent an early read of side-effect free ISR
166*4882a593Smuzhiyun	mrs	x2, isr_el1
167*4882a593Smuzhiyun	tbnz	x2, #8, 2f	// ISR_EL1.A
168*4882a593Smuzhiyun	ret
169*4882a593Smuzhiyun	nop
170*4882a593Smuzhiyun2:
171*4882a593Smuzhiyunalternative_endif
172*4882a593Smuzhiyun	// We know we have a pending asynchronous abort, now is the
173*4882a593Smuzhiyun	// time to flush it out. From your VAXorcist book, page 666:
174*4882a593Smuzhiyun	// "Threaten me not, oh Evil one!  For I speak with
175*4882a593Smuzhiyun	// the power of DEC, and I command thee to show thyself!"
176*4882a593Smuzhiyun	mrs	x2, elr_el2
177*4882a593Smuzhiyun	mrs	x3, esr_el2
178*4882a593Smuzhiyun	mrs	x4, spsr_el2
179*4882a593Smuzhiyun	mov	x5, x0
180*4882a593Smuzhiyun
181*4882a593Smuzhiyun	msr	daifclr, #4	// Unmask aborts
182*4882a593Smuzhiyun
183*4882a593Smuzhiyun	// This is our single instruction exception window. A pending
184*4882a593Smuzhiyun	// SError is guaranteed to occur at the earliest when we unmask
185*4882a593Smuzhiyun	// it, and at the latest just after the ISB.
186*4882a593Smuzhiyunabort_guest_exit_start:
187*4882a593Smuzhiyun
188*4882a593Smuzhiyun	isb
189*4882a593Smuzhiyun
190*4882a593Smuzhiyunabort_guest_exit_end:
191*4882a593Smuzhiyun
192*4882a593Smuzhiyun	msr	daifset, #4	// Mask aborts
193*4882a593Smuzhiyun	ret
194*4882a593Smuzhiyun
195*4882a593Smuzhiyun	_kvm_extable	abort_guest_exit_start, 9997f
196*4882a593Smuzhiyun	_kvm_extable	abort_guest_exit_end, 9997f
197*4882a593Smuzhiyun9997:
198*4882a593Smuzhiyun	msr	daifset, #4	// Mask aborts
199*4882a593Smuzhiyun	mov	x0, #(1 << ARM_EXIT_WITH_SERROR_BIT)
200*4882a593Smuzhiyun
201*4882a593Smuzhiyun	// restore the EL1 exception context so that we can report some
202*4882a593Smuzhiyun	// information. Merge the exception code with the SError pending bit.
203*4882a593Smuzhiyun	msr	elr_el2, x2
204*4882a593Smuzhiyun	msr	esr_el2, x3
205*4882a593Smuzhiyun	msr	spsr_el2, x4
206*4882a593Smuzhiyun	orr	x0, x0, x5
207*4882a593Smuzhiyun1:	ret
208*4882a593SmuzhiyunSYM_FUNC_END(__guest_enter)
209