xref: /OK3568_Linux_fs/kernel/arch/arm64/include/asm/el2_setup.h (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0-only */
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Copyright (C) 2012,2013 - ARM Ltd
4*4882a593Smuzhiyun  * Author: Marc Zyngier <marc.zyngier@arm.com>
5*4882a593Smuzhiyun  */
6*4882a593Smuzhiyun 
7*4882a593Smuzhiyun #ifndef __ARM_KVM_INIT_H__
8*4882a593Smuzhiyun #define __ARM_KVM_INIT_H__
9*4882a593Smuzhiyun 
10*4882a593Smuzhiyun #ifndef __ASSEMBLY__
11*4882a593Smuzhiyun #error Assembly-only header
12*4882a593Smuzhiyun #endif
13*4882a593Smuzhiyun 
14*4882a593Smuzhiyun #include <asm/kvm_arm.h>
15*4882a593Smuzhiyun #include <asm/ptrace.h>
16*4882a593Smuzhiyun #include <asm/sysreg.h>
17*4882a593Smuzhiyun #include <linux/irqchip/arm-gic-v3.h>
18*4882a593Smuzhiyun 
19*4882a593Smuzhiyun .macro __init_el2_sctlr
20*4882a593Smuzhiyun 	mov_q	x0, INIT_SCTLR_EL2_MMU_OFF
21*4882a593Smuzhiyun 	msr	sctlr_el2, x0
22*4882a593Smuzhiyun 	isb
23*4882a593Smuzhiyun .endm
24*4882a593Smuzhiyun 
25*4882a593Smuzhiyun /*
26*4882a593Smuzhiyun  * Allow Non-secure EL1 and EL0 to access physical timer and counter.
27*4882a593Smuzhiyun  * This is not necessary for VHE, since the host kernel runs in EL2,
28*4882a593Smuzhiyun  * and EL0 accesses are configured in the later stage of boot process.
29*4882a593Smuzhiyun  * Note that when HCR_EL2.E2H == 1, CNTHCTL_EL2 has the same bit layout
30*4882a593Smuzhiyun  * as CNTKCTL_EL1, and CNTKCTL_EL1 accessing instructions are redefined
31*4882a593Smuzhiyun  * to access CNTHCTL_EL2. This allows the kernel designed to run at EL1
32*4882a593Smuzhiyun  * to transparently mess with the EL0 bits via CNTKCTL_EL1 access in
33*4882a593Smuzhiyun  * EL2.
34*4882a593Smuzhiyun  */
35*4882a593Smuzhiyun .macro __init_el2_timers
36*4882a593Smuzhiyun 	mrs	x0, cnthctl_el2
37*4882a593Smuzhiyun 	orr	x0, x0, #3			// Enable EL1 physical timers
38*4882a593Smuzhiyun 	msr	cnthctl_el2, x0
39*4882a593Smuzhiyun 	msr	cntvoff_el2, xzr		// Clear virtual offset
40*4882a593Smuzhiyun .endm
41*4882a593Smuzhiyun 
42*4882a593Smuzhiyun .macro __init_el2_debug
43*4882a593Smuzhiyun 	mrs	x1, id_aa64dfr0_el1
44*4882a593Smuzhiyun 	sbfx	x0, x1, #ID_AA64DFR0_PMUVER_SHIFT, #4
45*4882a593Smuzhiyun 	cmp	x0, #1
46*4882a593Smuzhiyun 	b.lt	.Lskip_pmu_\@			// Skip if no PMU present
47*4882a593Smuzhiyun 	mrs	x0, pmcr_el0			// Disable debug access traps
48*4882a593Smuzhiyun 	ubfx	x0, x0, #11, #5			// to EL2 and allow access to
49*4882a593Smuzhiyun .Lskip_pmu_\@:
50*4882a593Smuzhiyun 	csel	x2, xzr, x0, lt			// all PMU counters from EL1
51*4882a593Smuzhiyun 
52*4882a593Smuzhiyun 	/* Statistical profiling */
53*4882a593Smuzhiyun 	ubfx	x0, x1, #ID_AA64DFR0_PMSVER_SHIFT, #4
54*4882a593Smuzhiyun 	cbz	x0, .Lskip_spe_\@		// Skip if SPE not present
55*4882a593Smuzhiyun 
56*4882a593Smuzhiyun 	mrs_s	x0, SYS_PMBIDR_EL1              // If SPE available at EL2,
57*4882a593Smuzhiyun 	and	x0, x0, #(1 << SYS_PMBIDR_EL1_P_SHIFT)
58*4882a593Smuzhiyun 	cbnz	x0, .Lskip_spe_el2_\@		// then permit sampling of physical
59*4882a593Smuzhiyun 	mov	x0, #(1 << SYS_PMSCR_EL2_PCT_SHIFT | \
60*4882a593Smuzhiyun 		      1 << SYS_PMSCR_EL2_PA_SHIFT)
61*4882a593Smuzhiyun 	msr_s	SYS_PMSCR_EL2, x0		// addresses and physical counter
62*4882a593Smuzhiyun .Lskip_spe_el2_\@:
63*4882a593Smuzhiyun 	mov	x0, #(MDCR_EL2_E2PB_MASK << MDCR_EL2_E2PB_SHIFT)
64*4882a593Smuzhiyun 	orr	x2, x2, x0			// If we don't have VHE, then
65*4882a593Smuzhiyun 						// use EL1&0 translation.
66*4882a593Smuzhiyun 
67*4882a593Smuzhiyun .Lskip_spe_\@:
68*4882a593Smuzhiyun 	/* Trace buffer */
69*4882a593Smuzhiyun 	ubfx	x0, x1, #ID_AA64DFR0_TRBE_SHIFT, #4
70*4882a593Smuzhiyun 	cbz	x0, .Lskip_trace_\@		// Skip if TraceBuffer is not present
71*4882a593Smuzhiyun 
72*4882a593Smuzhiyun 	mrs_s	x0, SYS_TRBIDR_EL1
73*4882a593Smuzhiyun 	and	x0, x0, TRBIDR_PROG
74*4882a593Smuzhiyun 	cbnz	x0, .Lskip_trace_\@		// If TRBE is available at EL2
75*4882a593Smuzhiyun 
76*4882a593Smuzhiyun 	mov	x0, #(MDCR_EL2_E2TB_MASK << MDCR_EL2_E2TB_SHIFT)
77*4882a593Smuzhiyun 	orr	x2, x2, x0			// allow the EL1&0 translation
78*4882a593Smuzhiyun 						// to own it.
79*4882a593Smuzhiyun 
80*4882a593Smuzhiyun .Lskip_trace_\@:
81*4882a593Smuzhiyun 	msr	mdcr_el2, x2			// Configure debug traps
82*4882a593Smuzhiyun .endm
83*4882a593Smuzhiyun 
84*4882a593Smuzhiyun /* LORegions */
85*4882a593Smuzhiyun .macro __init_el2_lor
86*4882a593Smuzhiyun 	mrs	x1, id_aa64mmfr1_el1
87*4882a593Smuzhiyun 	ubfx	x0, x1, #ID_AA64MMFR1_LOR_SHIFT, 4
88*4882a593Smuzhiyun 	cbz	x0, .Lskip_lor_\@
89*4882a593Smuzhiyun 	msr_s	SYS_LORC_EL1, xzr
90*4882a593Smuzhiyun .Lskip_lor_\@:
91*4882a593Smuzhiyun .endm
92*4882a593Smuzhiyun 
93*4882a593Smuzhiyun /* Stage-2 translation */
94*4882a593Smuzhiyun .macro __init_el2_stage2
95*4882a593Smuzhiyun 	msr	vttbr_el2, xzr
96*4882a593Smuzhiyun .endm
97*4882a593Smuzhiyun 
98*4882a593Smuzhiyun /* GICv3 system register access */
99*4882a593Smuzhiyun .macro __init_el2_gicv3
100*4882a593Smuzhiyun 	mrs	x0, id_aa64pfr0_el1
101*4882a593Smuzhiyun 	ubfx	x0, x0, #ID_AA64PFR0_GIC_SHIFT, #4
102*4882a593Smuzhiyun 	cbz	x0, .Lskip_gicv3_\@
103*4882a593Smuzhiyun 
104*4882a593Smuzhiyun 	mrs_s	x0, SYS_ICC_SRE_EL2
105*4882a593Smuzhiyun 	orr	x0, x0, #ICC_SRE_EL2_SRE	// Set ICC_SRE_EL2.SRE==1
106*4882a593Smuzhiyun 	orr	x0, x0, #ICC_SRE_EL2_ENABLE	// Set ICC_SRE_EL2.Enable==1
107*4882a593Smuzhiyun 	msr_s	SYS_ICC_SRE_EL2, x0
108*4882a593Smuzhiyun 	isb					// Make sure SRE is now set
109*4882a593Smuzhiyun 	mrs_s	x0, SYS_ICC_SRE_EL2		// Read SRE back,
110*4882a593Smuzhiyun 	tbz	x0, #0, .Lskip_gicv3_\@		// and check that it sticks
111*4882a593Smuzhiyun 	msr_s	SYS_ICH_HCR_EL2, xzr		// Reset ICC_HCR_EL2 to defaults
112*4882a593Smuzhiyun .Lskip_gicv3_\@:
113*4882a593Smuzhiyun .endm
114*4882a593Smuzhiyun 
115*4882a593Smuzhiyun .macro __init_el2_hstr
116*4882a593Smuzhiyun 	msr	hstr_el2, xzr			// Disable CP15 traps to EL2
117*4882a593Smuzhiyun .endm
118*4882a593Smuzhiyun 
119*4882a593Smuzhiyun /* Virtual CPU ID registers */
120*4882a593Smuzhiyun .macro __init_el2_nvhe_idregs
121*4882a593Smuzhiyun 	mrs	x0, midr_el1
122*4882a593Smuzhiyun 	mrs	x1, mpidr_el1
123*4882a593Smuzhiyun 	msr	vpidr_el2, x0
124*4882a593Smuzhiyun 	msr	vmpidr_el2, x1
125*4882a593Smuzhiyun .endm
126*4882a593Smuzhiyun 
127*4882a593Smuzhiyun /* Coprocessor traps */
128*4882a593Smuzhiyun .macro __init_el2_nvhe_cptr
129*4882a593Smuzhiyun 	mov	x0, #0x33ff
130*4882a593Smuzhiyun 	msr	cptr_el2, x0			// Disable copro. traps to EL2
131*4882a593Smuzhiyun .endm
132*4882a593Smuzhiyun 
133*4882a593Smuzhiyun /* SVE register access */
134*4882a593Smuzhiyun .macro __init_el2_nvhe_sve
135*4882a593Smuzhiyun 	mrs	x1, id_aa64pfr0_el1
136*4882a593Smuzhiyun 	ubfx	x1, x1, #ID_AA64PFR0_SVE_SHIFT, #4
137*4882a593Smuzhiyun 	cbz	x1, .Lskip_sve_\@
138*4882a593Smuzhiyun 
139*4882a593Smuzhiyun 	bic	x0, x0, #CPTR_EL2_TZ		// Also disable SVE traps
140*4882a593Smuzhiyun 	msr	cptr_el2, x0			// Disable copro. traps to EL2
141*4882a593Smuzhiyun 	isb
142*4882a593Smuzhiyun 	mov	x1, #ZCR_ELx_LEN_MASK		// SVE: Enable full vector
143*4882a593Smuzhiyun 	msr_s	SYS_ZCR_EL2, x1			// length for EL1.
144*4882a593Smuzhiyun .Lskip_sve_\@:
145*4882a593Smuzhiyun .endm
146*4882a593Smuzhiyun 
147*4882a593Smuzhiyun .macro __init_el2_nvhe_prepare_eret
148*4882a593Smuzhiyun 	mov	x0, #INIT_PSTATE_EL1
149*4882a593Smuzhiyun 	msr	spsr_el2, x0
150*4882a593Smuzhiyun .endm
151*4882a593Smuzhiyun 
152*4882a593Smuzhiyun /**
153*4882a593Smuzhiyun  * Initialize EL2 registers to sane values. This should be called early on all
154*4882a593Smuzhiyun  * cores that were booted in EL2. Note that everything gets initialised as
155*4882a593Smuzhiyun  * if VHE was not evailable. The kernel context will be upgraded to VHE
156*4882a593Smuzhiyun  * if possible later on in the boot process
157*4882a593Smuzhiyun  *
158*4882a593Smuzhiyun  * Regs: x0, x1 and x2 are clobbered.
159*4882a593Smuzhiyun  */
160*4882a593Smuzhiyun .macro init_el2_state
161*4882a593Smuzhiyun 	__init_el2_sctlr
162*4882a593Smuzhiyun 	__init_el2_timers
163*4882a593Smuzhiyun 	__init_el2_debug
164*4882a593Smuzhiyun 	__init_el2_lor
165*4882a593Smuzhiyun 	__init_el2_stage2
166*4882a593Smuzhiyun 	__init_el2_gicv3
167*4882a593Smuzhiyun 	__init_el2_hstr
168*4882a593Smuzhiyun 	__init_el2_nvhe_idregs
169*4882a593Smuzhiyun 	__init_el2_nvhe_cptr
170*4882a593Smuzhiyun 	__init_el2_nvhe_sve
171*4882a593Smuzhiyun 	__init_el2_nvhe_prepare_eret
172*4882a593Smuzhiyun .endm
173*4882a593Smuzhiyun 
174*4882a593Smuzhiyun #endif /* __ARM_KVM_INIT_H__ */
175