xref: /OK3568_Linux_fs/kernel/arch/arm64/kvm/hyp/nvhe/hyp-init.S (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun/* SPDX-License-Identifier: GPL-2.0-only */
2*4882a593Smuzhiyun/*
3*4882a593Smuzhiyun * Copyright (C) 2012,2013 - ARM Ltd
4*4882a593Smuzhiyun * Author: Marc Zyngier <marc.zyngier@arm.com>
5*4882a593Smuzhiyun */
6*4882a593Smuzhiyun
7*4882a593Smuzhiyun#include <linux/arm-smccc.h>
8*4882a593Smuzhiyun#include <linux/linkage.h>
9*4882a593Smuzhiyun
10*4882a593Smuzhiyun#include <asm/alternative.h>
11*4882a593Smuzhiyun#include <asm/assembler.h>
12*4882a593Smuzhiyun#include <asm/el2_setup.h>
13*4882a593Smuzhiyun#include <asm/kvm_arm.h>
14*4882a593Smuzhiyun#include <asm/kvm_asm.h>
15*4882a593Smuzhiyun#include <asm/kvm_mmu.h>
16*4882a593Smuzhiyun#include <asm/pgtable-hwdef.h>
17*4882a593Smuzhiyun#include <asm/sysreg.h>
18*4882a593Smuzhiyun#include <asm/virt.h>
19*4882a593Smuzhiyun
20*4882a593Smuzhiyun	.text
21*4882a593Smuzhiyun	.pushsection	.idmap.text, "ax"
22*4882a593Smuzhiyun
23*4882a593Smuzhiyun	.align	11
24*4882a593Smuzhiyun
25*4882a593SmuzhiyunSYM_CODE_START(__kvm_hyp_init)
26*4882a593Smuzhiyun	ventry	__invalid		// Synchronous EL2t
27*4882a593Smuzhiyun	ventry	__invalid		// IRQ EL2t
28*4882a593Smuzhiyun	ventry	__invalid		// FIQ EL2t
29*4882a593Smuzhiyun	ventry	__invalid		// Error EL2t
30*4882a593Smuzhiyun
31*4882a593Smuzhiyun	ventry	__invalid		// Synchronous EL2h
32*4882a593Smuzhiyun	ventry	__invalid		// IRQ EL2h
33*4882a593Smuzhiyun	ventry	__invalid		// FIQ EL2h
34*4882a593Smuzhiyun	ventry	__invalid		// Error EL2h
35*4882a593Smuzhiyun
36*4882a593Smuzhiyun	ventry	__do_hyp_init		// Synchronous 64-bit EL1
37*4882a593Smuzhiyun	ventry	__invalid		// IRQ 64-bit EL1
38*4882a593Smuzhiyun	ventry	__invalid		// FIQ 64-bit EL1
39*4882a593Smuzhiyun	ventry	__invalid		// Error 64-bit EL1
40*4882a593Smuzhiyun
41*4882a593Smuzhiyun	ventry	__invalid		// Synchronous 32-bit EL1
42*4882a593Smuzhiyun	ventry	__invalid		// IRQ 32-bit EL1
43*4882a593Smuzhiyun	ventry	__invalid		// FIQ 32-bit EL1
44*4882a593Smuzhiyun	ventry	__invalid		// Error 32-bit EL1
45*4882a593Smuzhiyun
46*4882a593Smuzhiyun__invalid:
47*4882a593Smuzhiyun	b	.
48*4882a593Smuzhiyun
49*4882a593Smuzhiyun	/*
50*4882a593Smuzhiyun	 * Only uses x0..x3 so as to not clobber callee-saved SMCCC registers.
51*4882a593Smuzhiyun	 *
52*4882a593Smuzhiyun	 * x0: SMCCC function ID
53*4882a593Smuzhiyun	 * x1: struct kvm_nvhe_init_params PA
54*4882a593Smuzhiyun	 */
55*4882a593Smuzhiyun__do_hyp_init:
56*4882a593Smuzhiyun	/* Check for a stub HVC call */
57*4882a593Smuzhiyun	cmp	x0, #HVC_STUB_HCALL_NR
58*4882a593Smuzhiyun	b.lo	__kvm_handle_stub_hvc
59*4882a593Smuzhiyun
60*4882a593Smuzhiyun	mov	x3, #KVM_HOST_SMCCC_FUNC(__kvm_hyp_init)
61*4882a593Smuzhiyun	cmp	x0, x3
62*4882a593Smuzhiyun	b.eq	1f
63*4882a593Smuzhiyun
64*4882a593Smuzhiyun	mov	x0, #SMCCC_RET_NOT_SUPPORTED
65*4882a593Smuzhiyun	eret
66*4882a593Smuzhiyun
67*4882a593Smuzhiyun1:	mov	x0, x1
68*4882a593Smuzhiyun	mov	x3, lr
69*4882a593Smuzhiyun	bl	___kvm_hyp_init			// Clobbers x0..x2
70*4882a593Smuzhiyun	mov	lr, x3
71*4882a593Smuzhiyun
72*4882a593Smuzhiyun	/* Hello, World! */
73*4882a593Smuzhiyun	mov	x0, #SMCCC_RET_SUCCESS
74*4882a593Smuzhiyun	eret
75*4882a593SmuzhiyunSYM_CODE_END(__kvm_hyp_init)
76*4882a593Smuzhiyun
77*4882a593Smuzhiyun/*
78*4882a593Smuzhiyun * Initialize the hypervisor in EL2.
79*4882a593Smuzhiyun *
80*4882a593Smuzhiyun * Only uses x0..x2 so as to not clobber callee-saved SMCCC registers
81*4882a593Smuzhiyun * and leave x3 for the caller.
82*4882a593Smuzhiyun *
83*4882a593Smuzhiyun * x0: struct kvm_nvhe_init_params PA
84*4882a593Smuzhiyun */
85*4882a593SmuzhiyunSYM_CODE_START_LOCAL(___kvm_hyp_init)
86*4882a593Smuzhiyun	ldr	x1, [x0, #NVHE_INIT_TPIDR_EL2]
87*4882a593Smuzhiyun	msr	tpidr_el2, x1
88*4882a593Smuzhiyun
89*4882a593Smuzhiyun	ldr	x1, [x0, #NVHE_INIT_STACK_HYP_VA]
90*4882a593Smuzhiyun	mov	sp, x1
91*4882a593Smuzhiyun
92*4882a593Smuzhiyun	ldr	x1, [x0, #NVHE_INIT_MAIR_EL2]
93*4882a593Smuzhiyun	msr	mair_el2, x1
94*4882a593Smuzhiyun
95*4882a593Smuzhiyun	ldr	x1, [x0, #NVHE_INIT_HCR_EL2]
96*4882a593Smuzhiyun	msr	hcr_el2, x1
97*4882a593Smuzhiyun
98*4882a593Smuzhiyun	ldr	x1, [x0, #NVHE_INIT_VTTBR]
99*4882a593Smuzhiyun	msr	vttbr_el2, x1
100*4882a593Smuzhiyun
101*4882a593Smuzhiyun	ldr	x1, [x0, #NVHE_INIT_VTCR]
102*4882a593Smuzhiyun	msr	vtcr_el2, x1
103*4882a593Smuzhiyun
104*4882a593Smuzhiyun	ldr	x1, [x0, #NVHE_INIT_PGD_PA]
105*4882a593Smuzhiyun	phys_to_ttbr x2, x1
106*4882a593Smuzhiyunalternative_if ARM64_HAS_CNP
107*4882a593Smuzhiyun	orr	x2, x2, #TTBR_CNP_BIT
108*4882a593Smuzhiyunalternative_else_nop_endif
109*4882a593Smuzhiyun	msr	ttbr0_el2, x2
110*4882a593Smuzhiyun
111*4882a593Smuzhiyun	/*
112*4882a593Smuzhiyun	 * Set the PS bits in TCR_EL2.
113*4882a593Smuzhiyun	 */
114*4882a593Smuzhiyun	ldr	x0, [x0, #NVHE_INIT_TCR_EL2]
115*4882a593Smuzhiyun	tcr_compute_pa_size x0, #TCR_EL2_PS_SHIFT, x1, x2
116*4882a593Smuzhiyun	msr	tcr_el2, x0
117*4882a593Smuzhiyun
118*4882a593Smuzhiyun	isb
119*4882a593Smuzhiyun
120*4882a593Smuzhiyun	/* Invalidate the stale TLBs from Bootloader */
121*4882a593Smuzhiyun	tlbi	alle2
122*4882a593Smuzhiyun	tlbi	vmalls12e1
123*4882a593Smuzhiyun	dsb	sy
124*4882a593Smuzhiyun
125*4882a593Smuzhiyun	mov_q	x0, INIT_SCTLR_EL2_MMU_ON
126*4882a593Smuzhiyunalternative_if ARM64_HAS_ADDRESS_AUTH
127*4882a593Smuzhiyun	mov_q	x1, (SCTLR_ELx_ENIA | SCTLR_ELx_ENIB | \
128*4882a593Smuzhiyun		     SCTLR_ELx_ENDA | SCTLR_ELx_ENDB)
129*4882a593Smuzhiyun	orr	x0, x0, x1
130*4882a593Smuzhiyunalternative_else_nop_endif
131*4882a593Smuzhiyun	msr	sctlr_el2, x0
132*4882a593Smuzhiyun	isb
133*4882a593Smuzhiyun
134*4882a593Smuzhiyun	/* Set the host vector */
135*4882a593Smuzhiyun	ldr	x0, =__kvm_hyp_host_vector
136*4882a593Smuzhiyun	msr	vbar_el2, x0
137*4882a593Smuzhiyun
138*4882a593Smuzhiyun	ret
139*4882a593SmuzhiyunSYM_CODE_END(___kvm_hyp_init)
140*4882a593Smuzhiyun
141*4882a593Smuzhiyun/*
142*4882a593Smuzhiyun * PSCI CPU_ON entry point
143*4882a593Smuzhiyun *
144*4882a593Smuzhiyun * x0: struct kvm_nvhe_init_params PA
145*4882a593Smuzhiyun */
146*4882a593SmuzhiyunSYM_CODE_START(kvm_hyp_cpu_entry)
147*4882a593Smuzhiyun	mov	x1, #1				// is_cpu_on = true
148*4882a593Smuzhiyun	b	__kvm_hyp_init_cpu
149*4882a593SmuzhiyunSYM_CODE_END(kvm_hyp_cpu_entry)
150*4882a593Smuzhiyun
151*4882a593Smuzhiyun/*
152*4882a593Smuzhiyun * PSCI CPU_SUSPEND / SYSTEM_SUSPEND entry point
153*4882a593Smuzhiyun *
154*4882a593Smuzhiyun * x0: struct kvm_nvhe_init_params PA
155*4882a593Smuzhiyun */
156*4882a593SmuzhiyunSYM_CODE_START(kvm_hyp_cpu_resume)
157*4882a593Smuzhiyun	mov	x1, #0				// is_cpu_on = false
158*4882a593Smuzhiyun	b	__kvm_hyp_init_cpu
159*4882a593SmuzhiyunSYM_CODE_END(kvm_hyp_cpu_resume)
160*4882a593Smuzhiyun
161*4882a593Smuzhiyun/*
162*4882a593Smuzhiyun * Common code for CPU entry points. Initializes EL2 state and
163*4882a593Smuzhiyun * installs the hypervisor before handing over to a C handler.
164*4882a593Smuzhiyun *
165*4882a593Smuzhiyun * x0: struct kvm_nvhe_init_params PA
166*4882a593Smuzhiyun * x1: bool is_cpu_on
167*4882a593Smuzhiyun */
168*4882a593SmuzhiyunSYM_CODE_START_LOCAL(__kvm_hyp_init_cpu)
169*4882a593Smuzhiyun	mov	x28, x0				// Stash arguments
170*4882a593Smuzhiyun	mov	x29, x1
171*4882a593Smuzhiyun
172*4882a593Smuzhiyun	/* Check that the core was booted in EL2. */
173*4882a593Smuzhiyun	mrs	x0, CurrentEL
174*4882a593Smuzhiyun	cmp	x0, #CurrentEL_EL2
175*4882a593Smuzhiyun	b.eq	2f
176*4882a593Smuzhiyun
177*4882a593Smuzhiyun	/* The core booted in EL1. KVM cannot be initialized on it. */
178*4882a593Smuzhiyun1:	wfe
179*4882a593Smuzhiyun	wfi
180*4882a593Smuzhiyun	b	1b
181*4882a593Smuzhiyun
182*4882a593Smuzhiyun2:	msr	SPsel, #1			// We want to use SP_EL{1,2}
183*4882a593Smuzhiyun
184*4882a593Smuzhiyun	/* Initialize EL2 CPU state to sane values. */
185*4882a593Smuzhiyun	init_el2_state				// Clobbers x0..x2
186*4882a593Smuzhiyun
187*4882a593Smuzhiyun	/* Enable MMU, set vectors and stack. */
188*4882a593Smuzhiyun	mov	x0, x28
189*4882a593Smuzhiyun	bl	___kvm_hyp_init			// Clobbers x0..x2
190*4882a593Smuzhiyun
191*4882a593Smuzhiyun	/* Leave idmap. */
192*4882a593Smuzhiyun	mov	x0, x29
193*4882a593Smuzhiyun	ldr	x1, =kvm_host_psci_cpu_entry
194*4882a593Smuzhiyun	br	x1
195*4882a593SmuzhiyunSYM_CODE_END(__kvm_hyp_init_cpu)
196*4882a593Smuzhiyun
197*4882a593SmuzhiyunSYM_CODE_START(__kvm_handle_stub_hvc)
198*4882a593Smuzhiyun	cmp	x0, #HVC_SOFT_RESTART
199*4882a593Smuzhiyun	b.ne	1f
200*4882a593Smuzhiyun
201*4882a593Smuzhiyun	/* This is where we're about to jump, staying at EL2 */
202*4882a593Smuzhiyun	msr	elr_el2, x1
203*4882a593Smuzhiyun	mov	x0, #(PSR_F_BIT | PSR_I_BIT | PSR_A_BIT | PSR_D_BIT | PSR_MODE_EL2h)
204*4882a593Smuzhiyun	msr	spsr_el2, x0
205*4882a593Smuzhiyun
206*4882a593Smuzhiyun	/* Shuffle the arguments, and don't come back */
207*4882a593Smuzhiyun	mov	x0, x2
208*4882a593Smuzhiyun	mov	x1, x3
209*4882a593Smuzhiyun	mov	x2, x4
210*4882a593Smuzhiyun	b	reset
211*4882a593Smuzhiyun
212*4882a593Smuzhiyun1:	cmp	x0, #HVC_RESET_VECTORS
213*4882a593Smuzhiyun	b.ne	1f
214*4882a593Smuzhiyun
215*4882a593Smuzhiyun	/*
216*4882a593Smuzhiyun	 * Set the HVC_RESET_VECTORS return code before entering the common
217*4882a593Smuzhiyun	 * path so that we do not clobber x0-x2 in case we are coming via
218*4882a593Smuzhiyun	 * HVC_SOFT_RESTART.
219*4882a593Smuzhiyun	 */
220*4882a593Smuzhiyun	mov	x0, xzr
221*4882a593Smuzhiyunreset:
222*4882a593Smuzhiyun	/* Reset kvm back to the hyp stub. */
223*4882a593Smuzhiyun	mov_q	x5, INIT_SCTLR_EL2_MMU_OFF
224*4882a593Smuzhiyun	pre_disable_mmu_workaround
225*4882a593Smuzhiyun	msr	sctlr_el2, x5
226*4882a593Smuzhiyun	isb
227*4882a593Smuzhiyun
228*4882a593Smuzhiyunalternative_if ARM64_KVM_PROTECTED_MODE
229*4882a593Smuzhiyun	mov_q	x5, HCR_HOST_NVHE_FLAGS
230*4882a593Smuzhiyun	msr	hcr_el2, x5
231*4882a593Smuzhiyunalternative_else_nop_endif
232*4882a593Smuzhiyun
233*4882a593Smuzhiyun	/* Install stub vectors */
234*4882a593Smuzhiyun	adr_l	x5, __hyp_stub_vectors
235*4882a593Smuzhiyun	msr	vbar_el2, x5
236*4882a593Smuzhiyun	eret
237*4882a593Smuzhiyun
238*4882a593Smuzhiyun1:	/* Bad stub call */
239*4882a593Smuzhiyun	mov_q	x0, HVC_STUB_ERR
240*4882a593Smuzhiyun	eret
241*4882a593Smuzhiyun
242*4882a593SmuzhiyunSYM_CODE_END(__kvm_handle_stub_hvc)
243*4882a593Smuzhiyun
244*4882a593SmuzhiyunSYM_FUNC_START(__pkvm_init_switch_pgd)
245*4882a593Smuzhiyun	/* Turn the MMU off */
246*4882a593Smuzhiyun	pre_disable_mmu_workaround
247*4882a593Smuzhiyun	mrs	x2, sctlr_el2
248*4882a593Smuzhiyun	bic	x3, x2, #SCTLR_ELx_M
249*4882a593Smuzhiyun	msr	sctlr_el2, x3
250*4882a593Smuzhiyun	isb
251*4882a593Smuzhiyun
252*4882a593Smuzhiyun	tlbi	alle2
253*4882a593Smuzhiyun
254*4882a593Smuzhiyun	/* Install the new pgtables */
255*4882a593Smuzhiyun	ldr	x3, [x0, #NVHE_INIT_PGD_PA]
256*4882a593Smuzhiyun	phys_to_ttbr x4, x3
257*4882a593Smuzhiyunalternative_if ARM64_HAS_CNP
258*4882a593Smuzhiyun	orr	x4, x4, #TTBR_CNP_BIT
259*4882a593Smuzhiyunalternative_else_nop_endif
260*4882a593Smuzhiyun	msr	ttbr0_el2, x4
261*4882a593Smuzhiyun
262*4882a593Smuzhiyun	/* Set the new stack pointer */
263*4882a593Smuzhiyun	ldr	x0, [x0, #NVHE_INIT_STACK_HYP_VA]
264*4882a593Smuzhiyun	mov	sp, x0
265*4882a593Smuzhiyun
266*4882a593Smuzhiyun	/* And turn the MMU back on! */
267*4882a593Smuzhiyun	set_sctlr_el2	x2
268*4882a593Smuzhiyun	ret	x1
269*4882a593SmuzhiyunSYM_FUNC_END(__pkvm_init_switch_pgd)
270*4882a593Smuzhiyun
271*4882a593Smuzhiyun	.popsection
272