xref: /rk3399_ARM-atf/lib/cpus/aarch64/cortex_a76.S (revision 508d71108a06c7fce2eeef78659b9b7739cee6eb)
1/*
2 * Copyright (c) 2017-2019, ARM Limited and Contributors. All rights reserved.
3 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 */
6
7#include <arch.h>
8#include <asm_macros.S>
9#include <common/bl_common.h>
10#include <context.h>
11#include <cortex_a76.h>
12#include <cpu_macros.S>
13#include <plat_macros.S>
14#include <services/arm_arch_svc.h>
15
16#if !DYNAMIC_WORKAROUND_CVE_2018_3639
17#error Cortex A76 requires DYNAMIC_WORKAROUND_CVE_2018_3639=1
18#endif
19
20#define ESR_EL3_A64_SMC0	0x5e000000
21#define ESR_EL3_A32_SMC0	0x4e000000
22
23	/*
24	 * This macro applies the mitigation for CVE-2018-3639.
25	 * It implements a fash path where `SMCCC_ARCH_WORKAROUND_2`
26	 * SMC calls from a lower EL running in AArch32 or AArch64
27	 * will go through the fast and return early.
28	 *
29	 * The macro saves x2-x3 to the context.  In the fast path
30	 * x0-x3 registers do not need to be restored as the calling
31	 * context will have saved them.
32	 */
33	.macro apply_cve_2018_3639_wa _is_sync_exception _esr_el3_val
34	stp	x2, x3, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X2]
35
36	.if \_is_sync_exception
37		/*
38		 * Ensure SMC is coming from A64/A32 state on #0
39		 * with W0 = SMCCC_ARCH_WORKAROUND_2
40		 *
41		 * This sequence evaluates as:
42		 *    (W0==SMCCC_ARCH_WORKAROUND_2) ? (ESR_EL3==SMC#0) : (NE)
43		 * allowing use of a single branch operation
44		 */
45		orr	w2, wzr, #SMCCC_ARCH_WORKAROUND_2
46		cmp	x0, x2
47		mrs	x3, esr_el3
48		mov_imm	w2, \_esr_el3_val
49		ccmp	w2, w3, #0, eq
50		/*
51		 * Static predictor will predict a fall-through, optimizing
52		 * the `SMCCC_ARCH_WORKAROUND_2` fast path.
53		 */
54		bne	1f
55
56		/*
57		 * The sequence below implements the `SMCCC_ARCH_WORKAROUND_2`
58		 * fast path.
59		 */
60		cmp	x1, xzr /* enable/disable check */
61
62		/*
63		 * When the calling context wants mitigation disabled,
64		 * we program the mitigation disable function in the
65		 * CPU context, which gets invoked on subsequent exits from
66		 * EL3 via the `el3_exit` function.  Otherwise NULL is
67		 * programmed in the CPU context, which results in caller's
68		 * inheriting the EL3 mitigation state (enabled) on subsequent
69		 * `el3_exit`.
70		 */
71		mov	x0, xzr
72		adr	x1, cortex_a76_disable_wa_cve_2018_3639
73		csel	x1, x1, x0, eq
74		str	x1, [sp, #CTX_CVE_2018_3639_OFFSET + CTX_CVE_2018_3639_DISABLE]
75
76		mrs	x2, CORTEX_A76_CPUACTLR2_EL1
77		orr	x1, x2, #CORTEX_A76_CPUACTLR2_EL1_DISABLE_LOAD_PASS_STORE
78		bic	x3, x2, #CORTEX_A76_CPUACTLR2_EL1_DISABLE_LOAD_PASS_STORE
79		csel	x3, x3, x1, eq
80		msr	CORTEX_A76_CPUACTLR2_EL1, x3
81		eret	/* ERET implies ISB */
82	.endif
831:
84	/*
85	 * Always enable v4 mitigation during EL3 execution.  This is not
86	 * required for the fast path above because it does not perform any
87	 * memory loads.
88	 */
89	mrs	x2, CORTEX_A76_CPUACTLR2_EL1
90	orr	x2, x2, #CORTEX_A76_CPUACTLR2_EL1_DISABLE_LOAD_PASS_STORE
91	msr	CORTEX_A76_CPUACTLR2_EL1, x2
92	isb
93
94	/*
95	 * The caller may have passed arguments to EL3 via x2-x3.
96	 * Restore these registers from the context before jumping to the
97	 * main runtime vector table entry.
98	 */
99	ldp	x2, x3, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X2]
100	.endm
101
102vector_base cortex_a76_wa_cve_2018_3639_a76_vbar
103
104	/* ---------------------------------------------------------------------
105	 * Current EL with SP_EL0 : 0x0 - 0x200
106	 * ---------------------------------------------------------------------
107	 */
108vector_entry cortex_a76_sync_exception_sp_el0
109	b	sync_exception_sp_el0
110end_vector_entry cortex_a76_sync_exception_sp_el0
111
112vector_entry cortex_a76_irq_sp_el0
113	b	irq_sp_el0
114end_vector_entry cortex_a76_irq_sp_el0
115
116vector_entry cortex_a76_fiq_sp_el0
117	b	fiq_sp_el0
118end_vector_entry cortex_a76_fiq_sp_el0
119
120vector_entry cortex_a76_serror_sp_el0
121	b	serror_sp_el0
122end_vector_entry cortex_a76_serror_sp_el0
123
124	/* ---------------------------------------------------------------------
125	 * Current EL with SP_ELx: 0x200 - 0x400
126	 * ---------------------------------------------------------------------
127	 */
128vector_entry cortex_a76_sync_exception_sp_elx
129	b	sync_exception_sp_elx
130end_vector_entry cortex_a76_sync_exception_sp_elx
131
132vector_entry cortex_a76_irq_sp_elx
133	b	irq_sp_elx
134end_vector_entry cortex_a76_irq_sp_elx
135
136vector_entry cortex_a76_fiq_sp_elx
137	b	fiq_sp_elx
138end_vector_entry cortex_a76_fiq_sp_elx
139
140vector_entry cortex_a76_serror_sp_elx
141	b	serror_sp_elx
142end_vector_entry cortex_a76_serror_sp_elx
143
144	/* ---------------------------------------------------------------------
145	 * Lower EL using AArch64 : 0x400 - 0x600
146	 * ---------------------------------------------------------------------
147	 */
148vector_entry cortex_a76_sync_exception_aarch64
149	apply_cve_2018_3639_wa _is_sync_exception=1 _esr_el3_val=ESR_EL3_A64_SMC0
150	b	sync_exception_aarch64
151end_vector_entry cortex_a76_sync_exception_aarch64
152
153vector_entry cortex_a76_irq_aarch64
154	apply_cve_2018_3639_wa _is_sync_exception=0 _esr_el3_val=ESR_EL3_A64_SMC0
155	b	irq_aarch64
156end_vector_entry cortex_a76_irq_aarch64
157
158vector_entry cortex_a76_fiq_aarch64
159	apply_cve_2018_3639_wa _is_sync_exception=0 _esr_el3_val=ESR_EL3_A64_SMC0
160	b	fiq_aarch64
161end_vector_entry cortex_a76_fiq_aarch64
162
163vector_entry cortex_a76_serror_aarch64
164	apply_cve_2018_3639_wa _is_sync_exception=0 _esr_el3_val=ESR_EL3_A64_SMC0
165	b	serror_aarch64
166end_vector_entry cortex_a76_serror_aarch64
167
168	/* ---------------------------------------------------------------------
169	 * Lower EL using AArch32 : 0x600 - 0x800
170	 * ---------------------------------------------------------------------
171	 */
172vector_entry cortex_a76_sync_exception_aarch32
173	apply_cve_2018_3639_wa _is_sync_exception=1 _esr_el3_val=ESR_EL3_A32_SMC0
174	b	sync_exception_aarch32
175end_vector_entry cortex_a76_sync_exception_aarch32
176
177vector_entry cortex_a76_irq_aarch32
178	apply_cve_2018_3639_wa _is_sync_exception=0 _esr_el3_val=ESR_EL3_A32_SMC0
179	b	irq_aarch32
180end_vector_entry cortex_a76_irq_aarch32
181
182vector_entry cortex_a76_fiq_aarch32
183	apply_cve_2018_3639_wa _is_sync_exception=0 _esr_el3_val=ESR_EL3_A32_SMC0
184	b	fiq_aarch32
185end_vector_entry cortex_a76_fiq_aarch32
186
187vector_entry cortex_a76_serror_aarch32
188	apply_cve_2018_3639_wa _is_sync_exception=0 _esr_el3_val=ESR_EL3_A32_SMC0
189	b	serror_aarch32
190end_vector_entry cortex_a76_serror_aarch32
191
192	/* --------------------------------------------------
193	 * Errata Workaround for Cortex A76 Errata #1130799.
194	 * This applies only to revision <= r2p0 of Cortex A76.
195	 * Inputs:
196	 * x0: variant[4:7] and revision[0:3] of current cpu.
197	 * Shall clobber: x0-x17
198	 * --------------------------------------------------
199	 */
200func errata_a76_1130799_wa
201	/*
202	 * Compare x0 against revision r2p0
203	 */
204	mov	x17, x30
205	bl	check_errata_1130799
206	cbz	x0, 1f
207	mrs	x1, CORTEX_A76_CPUACTLR2_EL1
208	orr	x1, x1 ,#(1 << 59)
209	msr	CORTEX_A76_CPUACTLR2_EL1, x1
210	isb
2111:
212	ret	x17
213endfunc errata_a76_1130799_wa
214
215func check_errata_1130799
216	mov	x1, #0x20
217	b	cpu_rev_var_ls
218endfunc check_errata_1130799
219
220func check_errata_cve_2018_3639
221#if WORKAROUND_CVE_2018_3639
222	mov	x0, #ERRATA_APPLIES
223#else
224	mov	x0, #ERRATA_MISSING
225#endif
226	ret
227endfunc check_errata_cve_2018_3639
228
229func cortex_a76_disable_wa_cve_2018_3639
230	mrs	x0, CORTEX_A76_CPUACTLR2_EL1
231	bic	x0, x0, #CORTEX_A76_CPUACTLR2_EL1_DISABLE_LOAD_PASS_STORE
232	msr	CORTEX_A76_CPUACTLR2_EL1, x0
233	isb
234	ret
235endfunc cortex_a76_disable_wa_cve_2018_3639
236
237	/* -------------------------------------------------
238	 * The CPU Ops reset function for Cortex-A76.
239	 * Shall clobber: x0-x19
240	 * -------------------------------------------------
241	 */
242func cortex_a76_reset_func
243	mov	x19, x30
244	bl	cpu_get_rev_var
245
246#if ERRATA_A76_1130799
247	bl	errata_a76_1130799_wa
248#endif
249
250#if WORKAROUND_CVE_2018_3639
251	/* If the PE implements SSBS, we don't need the dynamic workaround */
252	mrs	x0, id_aa64pfr1_el1
253	lsr	x0, x0, #ID_AA64PFR1_EL1_SSBS_SHIFT
254	and     x0, x0, #ID_AA64PFR1_EL1_SSBS_MASK
255	cbnz	x0, 1f
256
257	mrs	x0, CORTEX_A76_CPUACTLR2_EL1
258	orr	x0, x0, #CORTEX_A76_CPUACTLR2_EL1_DISABLE_LOAD_PASS_STORE
259	msr	CORTEX_A76_CPUACTLR2_EL1, x0
260	isb
261
262#ifdef IMAGE_BL31
263	/*
264	 * The Cortex-A76 generic vectors are overwritten to use the vectors
265	 * defined above.  This is required in order to apply mitigation
266	 * against CVE-2018-3639 on exception entry from lower ELs.
267	 */
268	adr	x0, cortex_a76_wa_cve_2018_3639_a76_vbar
269	msr	vbar_el3, x0
270	isb
271#endif
272
2731:
274#endif
275
276#if ERRATA_DSU_936184
277	bl	errata_dsu_936184_wa
278#endif
279	ret	x19
280endfunc cortex_a76_reset_func
281
282	/* ---------------------------------------------
283	 * HW will do the cache maintenance while powering down
284	 * ---------------------------------------------
285	 */
286func cortex_a76_core_pwr_dwn
287	/* ---------------------------------------------
288	 * Enable CPU power down bit in power control register
289	 * ---------------------------------------------
290	 */
291	mrs	x0, CORTEX_A76_CPUPWRCTLR_EL1
292	orr	x0, x0, #CORTEX_A76_CORE_PWRDN_EN_MASK
293	msr	CORTEX_A76_CPUPWRCTLR_EL1, x0
294	isb
295	ret
296endfunc cortex_a76_core_pwr_dwn
297
298#if REPORT_ERRATA
299/*
300 * Errata printing function for Cortex Cortex A76. Must follow AAPCS.
301 */
302func cortex_a76_errata_report
303	stp	x8, x30, [sp, #-16]!
304
305	bl	cpu_get_rev_var
306	mov	x8, x0
307
308	/*
309	 * Report all errata. The revision-variant information is passed to
310	 * checking functions of each errata.
311	 */
312	report_errata ERRATA_A76_1130799, cortex_a76, 1130799
313	report_errata WORKAROUND_CVE_2018_3639, cortex_a76, cve_2018_3639
314	report_errata ERRATA_DSU_936184, cortex_a76, dsu_936184
315
316	ldp	x8, x30, [sp], #16
317	ret
318endfunc cortex_a76_errata_report
319#endif
320
321	/* ---------------------------------------------
322	 * This function provides cortex_a76 specific
323	 * register information for crash reporting.
324	 * It needs to return with x6 pointing to
325	 * a list of register names in ascii and
326	 * x8 - x15 having values of registers to be
327	 * reported.
328	 * ---------------------------------------------
329	 */
330.section .rodata.cortex_a76_regs, "aS"
331cortex_a76_regs:  /* The ascii list of register names to be reported */
332	.asciz	"cpuectlr_el1", ""
333
334func cortex_a76_cpu_reg_dump
335	adr	x6, cortex_a76_regs
336	mrs	x8, CORTEX_A76_CPUECTLR_EL1
337	ret
338endfunc cortex_a76_cpu_reg_dump
339
340declare_cpu_ops_wa cortex_a76, CORTEX_A76_MIDR, \
341	cortex_a76_reset_func, \
342	CPU_NO_EXTRA1_FUNC, \
343	cortex_a76_disable_wa_cve_2018_3639, \
344	cortex_a76_core_pwr_dwn
345