xref: /rk3399_ARM-atf/lib/cpus/aarch64/cortex_a76.S (revision c3cf06f1a3a9b9ee8ac7a0ae505f95c45f7dca84)
1/*
2 * Copyright (c) 2017-2018, ARM Limited and Contributors. All rights reserved.
3 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 */
6
7#include <arch.h>
8#include <arm_arch_svc.h>
9#include <asm_macros.S>
10#include <bl_common.h>
11#include <context.h>
12#include <cortex_a76.h>
13#include <cpu_macros.S>
14#include <plat_macros.S>
15
16#if !DYNAMIC_WORKAROUND_CVE_2018_3639
17#error Cortex A76 requires DYNAMIC_WORKAROUND_CVE_2018_3639=1
18#endif
19
20#define ESR_EL3_A64_SMC0	0x5e000000
21#define ESR_EL3_A32_SMC0	0x4e000000
22
23	/*
24	 * This macro applies the mitigation for CVE-2018-3639.
25	 * It implements a fash path where `SMCCC_ARCH_WORKAROUND_2`
26	 * SMC calls from a lower EL running in AArch32 or AArch64
27	 * will go through the fast and return early.
28	 *
29	 * The macro saves x2-x3 to the context.  In the fast path
30	 * x0-x3 registers do not need to be restored as the calling
31	 * context will have saved them.
32	 */
33	.macro apply_cve_2018_3639_wa _is_sync_exception _esr_el3_val
34	stp	x2, x3, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X2]
35
36	.if \_is_sync_exception
37		/*
38		 * Ensure SMC is coming from A64/A32 state on #0
39		 * with W0 = SMCCC_ARCH_WORKAROUND_2
40		 *
41		 * This sequence evaluates as:
42		 *    (W0==SMCCC_ARCH_WORKAROUND_2) ? (ESR_EL3==SMC#0) : (NE)
43		 * allowing use of a single branch operation
44		 */
45		orr	w2, wzr, #SMCCC_ARCH_WORKAROUND_2
46		cmp	x0, x2
47		mrs	x3, esr_el3
48		mov_imm	w2, \_esr_el3_val
49		ccmp	w2, w3, #0, eq
50		/*
51		 * Static predictor will predict a fall-through, optimizing
52		 * the `SMCCC_ARCH_WORKAROUND_2` fast path.
53		 */
54		bne	1f
55
56		/*
57		 * The sequence below implements the `SMCCC_ARCH_WORKAROUND_2`
58		 * fast path.
59		 */
60		cmp	x1, xzr /* enable/disable check */
61
62		/*
63		 * When the calling context wants mitigation disabled,
64		 * we program the mitigation disable function in the
65		 * CPU context, which gets invoked on subsequent exits from
66		 * EL3 via the `el3_exit` function.  Otherwise NULL is
67		 * programmed in the CPU context, which results in caller's
68		 * inheriting the EL3 mitigation state (enabled) on subsequent
69		 * `el3_exit`.
70		 */
71		mov	x0, xzr
72		adr	x1, cortex_a76_disable_wa_cve_2018_3639
73		csel	x1, x1, x0, eq
74		str	x1, [sp, #CTX_CVE_2018_3639_OFFSET + CTX_CVE_2018_3639_DISABLE]
75
76		mrs	x2, CORTEX_A76_CPUACTLR2_EL1
77		orr	x1, x2, #CORTEX_A76_CPUACTLR2_EL1_DISABLE_LOAD_PASS_STORE
78		bic	x3, x2, #CORTEX_A76_CPUACTLR2_EL1_DISABLE_LOAD_PASS_STORE
79		csel	x3, x3, x1, eq
80		msr	CORTEX_A76_CPUACTLR2_EL1, x3
81		eret	/* ERET implies ISB */
82	.endif
831:
84	/*
85	 * Always enable v4 mitigation during EL3 execution.  This is not
86	 * required for the fast path above because it does not perform any
87	 * memory loads.
88	 */
89	mrs	x2, CORTEX_A76_CPUACTLR2_EL1
90	orr	x2, x2, #CORTEX_A76_CPUACTLR2_EL1_DISABLE_LOAD_PASS_STORE
91	msr	CORTEX_A76_CPUACTLR2_EL1, x2
92	isb
93
94	/*
95	 * The caller may have passed arguments to EL3 via x2-x3.
96	 * Restore these registers from the context before jumping to the
97	 * main runtime vector table entry.
98	 */
99	ldp	x2, x3, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X2]
100	.endm
101
102vector_base cortex_a76_wa_cve_2018_3639_a76_vbar
103
104	/* ---------------------------------------------------------------------
105	 * Current EL with SP_EL0 : 0x0 - 0x200
106	 * ---------------------------------------------------------------------
107	 */
108vector_entry cortex_a76_sync_exception_sp_el0
109	b	sync_exception_sp_el0
110end_vector_entry cortex_a76_sync_exception_sp_el0
111
112vector_entry cortex_a76_irq_sp_el0
113	b	irq_sp_el0
114end_vector_entry cortex_a76_irq_sp_el0
115
116vector_entry cortex_a76_fiq_sp_el0
117	b	fiq_sp_el0
118end_vector_entry cortex_a76_fiq_sp_el0
119
120vector_entry cortex_a76_serror_sp_el0
121	b	serror_sp_el0
122end_vector_entry cortex_a76_serror_sp_el0
123
124	/* ---------------------------------------------------------------------
125	 * Current EL with SP_ELx: 0x200 - 0x400
126	 * ---------------------------------------------------------------------
127	 */
128vector_entry cortex_a76_sync_exception_sp_elx
129	b	sync_exception_sp_elx
130end_vector_entry cortex_a76_sync_exception_sp_elx
131
132vector_entry cortex_a76_irq_sp_elx
133	b	irq_sp_elx
134end_vector_entry cortex_a76_irq_sp_elx
135
136vector_entry cortex_a76_fiq_sp_elx
137	b	fiq_sp_elx
138end_vector_entry cortex_a76_fiq_sp_elx
139
140vector_entry cortex_a76_serror_sp_elx
141	b	serror_sp_elx
142end_vector_entry cortex_a76_serror_sp_elx
143
144	/* ---------------------------------------------------------------------
145	 * Lower EL using AArch64 : 0x400 - 0x600
146	 * ---------------------------------------------------------------------
147	 */
148vector_entry cortex_a76_sync_exception_aarch64
149	apply_cve_2018_3639_wa _is_sync_exception=1 _esr_el3_val=ESR_EL3_A64_SMC0
150	b	sync_exception_aarch64
151end_vector_entry cortex_a76_sync_exception_aarch64
152
153vector_entry cortex_a76_irq_aarch64
154	apply_cve_2018_3639_wa _is_sync_exception=0 _esr_el3_val=ESR_EL3_A64_SMC0
155	b	irq_aarch64
156end_vector_entry cortex_a76_irq_aarch64
157
158vector_entry cortex_a76_fiq_aarch64
159	apply_cve_2018_3639_wa _is_sync_exception=0 _esr_el3_val=ESR_EL3_A64_SMC0
160	b	fiq_aarch64
161end_vector_entry cortex_a76_fiq_aarch64
162
163vector_entry cortex_a76_serror_aarch64
164	apply_cve_2018_3639_wa _is_sync_exception=0 _esr_el3_val=ESR_EL3_A64_SMC0
165	b	serror_aarch64
166end_vector_entry cortex_a76_serror_aarch64
167
168	/* ---------------------------------------------------------------------
169	 * Lower EL using AArch32 : 0x600 - 0x800
170	 * ---------------------------------------------------------------------
171	 */
172vector_entry cortex_a76_sync_exception_aarch32
173	apply_cve_2018_3639_wa _is_sync_exception=1 _esr_el3_val=ESR_EL3_A32_SMC0
174	b	sync_exception_aarch32
175end_vector_entry cortex_a76_sync_exception_aarch32
176
177vector_entry cortex_a76_irq_aarch32
178	apply_cve_2018_3639_wa _is_sync_exception=0 _esr_el3_val=ESR_EL3_A32_SMC0
179	b	irq_aarch32
180end_vector_entry cortex_a76_irq_aarch32
181
182vector_entry cortex_a76_fiq_aarch32
183	apply_cve_2018_3639_wa _is_sync_exception=0 _esr_el3_val=ESR_EL3_A32_SMC0
184	b	fiq_aarch32
185end_vector_entry cortex_a76_fiq_aarch32
186
187vector_entry cortex_a76_serror_aarch32
188	apply_cve_2018_3639_wa _is_sync_exception=0 _esr_el3_val=ESR_EL3_A32_SMC0
189	b	serror_aarch32
190end_vector_entry cortex_a76_serror_aarch32
191
192func check_errata_cve_2018_3639
193#if WORKAROUND_CVE_2018_3639
194	mov	x0, #ERRATA_APPLIES
195#else
196	mov	x0, #ERRATA_MISSING
197#endif
198	ret
199endfunc check_errata_cve_2018_3639
200
201func cortex_a76_disable_wa_cve_2018_3639
202	mrs	x0, CORTEX_A76_CPUACTLR2_EL1
203	bic	x0, x0, #CORTEX_A76_CPUACTLR2_EL1_DISABLE_LOAD_PASS_STORE
204	msr	CORTEX_A76_CPUACTLR2_EL1, x0
205	isb
206	ret
207endfunc cortex_a76_disable_wa_cve_2018_3639
208
209func cortex_a76_reset_func
210	mov	x19, x30
211#if WORKAROUND_CVE_2018_3639
212	mrs	x0, CORTEX_A76_CPUACTLR2_EL1
213	orr	x0, x0, #CORTEX_A76_CPUACTLR2_EL1_DISABLE_LOAD_PASS_STORE
214	msr	CORTEX_A76_CPUACTLR2_EL1, x0
215	isb
216#endif
217
218#if IMAGE_BL31 && WORKAROUND_CVE_2018_3639
219	/*
220	 * The Cortex-A76 generic vectors are overwritten to use the vectors
221	 * defined above.  This is required in order to apply mitigation
222	 * against CVE-2018-3639 on exception entry from lower ELs.
223	 */
224	adr	x0, cortex_a76_wa_cve_2018_3639_a76_vbar
225	msr	vbar_el3, x0
226	isb
227#endif
228
229#if ERRATA_DSU_936184
230	bl	errata_dsu_936184_wa
231#endif
232	ret	x19
233endfunc cortex_a76_reset_func
234
235	/* ---------------------------------------------
236	 * HW will do the cache maintenance while powering down
237	 * ---------------------------------------------
238	 */
239func cortex_a76_core_pwr_dwn
240	/* ---------------------------------------------
241	 * Enable CPU power down bit in power control register
242	 * ---------------------------------------------
243	 */
244	mrs	x0, CORTEX_A76_CPUPWRCTLR_EL1
245	orr	x0, x0, #CORTEX_A76_CORE_PWRDN_EN_MASK
246	msr	CORTEX_A76_CPUPWRCTLR_EL1, x0
247	isb
248	ret
249endfunc cortex_a76_core_pwr_dwn
250
251#if REPORT_ERRATA
252/*
253 * Errata printing function for Cortex Cortex A76. Must follow AAPCS.
254 */
255func cortex_a76_errata_report
256	stp	x8, x30, [sp, #-16]!
257
258	bl	cpu_get_rev_var
259	mov	x8, x0
260
261	/*
262	 * Report all errata. The revision-variant information is passed to
263	 * checking functions of each errata.
264	 */
265	report_errata WORKAROUND_CVE_2018_3639, cortex_a76, cve_2018_3639
266	report_errata ERRATA_DSU_936184, cortex_a76, dsu_936184
267
268	ldp	x8, x30, [sp], #16
269	ret
270endfunc cortex_a76_errata_report
271#endif
272
273	/* ---------------------------------------------
274	 * This function provides cortex_a76 specific
275	 * register information for crash reporting.
276	 * It needs to return with x6 pointing to
277	 * a list of register names in ascii and
278	 * x8 - x15 having values of registers to be
279	 * reported.
280	 * ---------------------------------------------
281	 */
282.section .rodata.cortex_a76_regs, "aS"
283cortex_a76_regs:  /* The ascii list of register names to be reported */
284	.asciz	"cpuectlr_el1", ""
285
286func cortex_a76_cpu_reg_dump
287	adr	x6, cortex_a76_regs
288	mrs	x8, CORTEX_A76_CPUECTLR_EL1
289	ret
290endfunc cortex_a76_cpu_reg_dump
291
292declare_cpu_ops_wa cortex_a76, CORTEX_A76_MIDR, \
293	cortex_a76_reset_func, \
294	CPU_NO_EXTRA1_FUNC, \
295	cortex_a76_disable_wa_cve_2018_3639, \
296	cortex_a76_core_pwr_dwn
297