xref: /rk3399_ARM-atf/lib/cpus/aarch64/cortex_a76.S (revision 5cc8c7ba1b24ace2ef7345e96d933141f3609817)
1/*
2 * Copyright (c) 2017-2019, ARM Limited and Contributors. All rights reserved.
3 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 */
6
7#include <arch.h>
8#include <asm_macros.S>
9#include <common/bl_common.h>
10#include <context.h>
11#include <cortex_a76.h>
12#include <cpu_macros.S>
13#include <plat_macros.S>
14#include <services/arm_arch_svc.h>
15
16#if !DYNAMIC_WORKAROUND_CVE_2018_3639
17#error Cortex A76 requires DYNAMIC_WORKAROUND_CVE_2018_3639=1
18#endif
19
20#define ESR_EL3_A64_SMC0	0x5e000000
21#define ESR_EL3_A32_SMC0	0x4e000000
22
23	/*
24	 * This macro applies the mitigation for CVE-2018-3639.
25	 * It implements a fash path where `SMCCC_ARCH_WORKAROUND_2`
26	 * SMC calls from a lower EL running in AArch32 or AArch64
27	 * will go through the fast and return early.
28	 *
29	 * The macro saves x2-x3 to the context.  In the fast path
30	 * x0-x3 registers do not need to be restored as the calling
31	 * context will have saved them.
32	 */
33	.macro apply_cve_2018_3639_wa _is_sync_exception _esr_el3_val
34	stp	x2, x3, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X2]
35
36	.if \_is_sync_exception
37		/*
38		 * Ensure SMC is coming from A64/A32 state on #0
39		 * with W0 = SMCCC_ARCH_WORKAROUND_2
40		 *
41		 * This sequence evaluates as:
42		 *    (W0==SMCCC_ARCH_WORKAROUND_2) ? (ESR_EL3==SMC#0) : (NE)
43		 * allowing use of a single branch operation
44		 */
45		orr	w2, wzr, #SMCCC_ARCH_WORKAROUND_2
46		cmp	x0, x2
47		mrs	x3, esr_el3
48		mov_imm	w2, \_esr_el3_val
49		ccmp	w2, w3, #0, eq
50		/*
51		 * Static predictor will predict a fall-through, optimizing
52		 * the `SMCCC_ARCH_WORKAROUND_2` fast path.
53		 */
54		bne	1f
55
56		/*
57		 * The sequence below implements the `SMCCC_ARCH_WORKAROUND_2`
58		 * fast path.
59		 */
60		cmp	x1, xzr /* enable/disable check */
61
62		/*
63		 * When the calling context wants mitigation disabled,
64		 * we program the mitigation disable function in the
65		 * CPU context, which gets invoked on subsequent exits from
66		 * EL3 via the `el3_exit` function.  Otherwise NULL is
67		 * programmed in the CPU context, which results in caller's
68		 * inheriting the EL3 mitigation state (enabled) on subsequent
69		 * `el3_exit`.
70		 */
71		mov	x0, xzr
72		adr	x1, cortex_a76_disable_wa_cve_2018_3639
73		csel	x1, x1, x0, eq
74		str	x1, [sp, #CTX_CVE_2018_3639_OFFSET + CTX_CVE_2018_3639_DISABLE]
75
76		mrs	x2, CORTEX_A76_CPUACTLR2_EL1
77		orr	x1, x2, #CORTEX_A76_CPUACTLR2_EL1_DISABLE_LOAD_PASS_STORE
78		bic	x3, x2, #CORTEX_A76_CPUACTLR2_EL1_DISABLE_LOAD_PASS_STORE
79		csel	x3, x3, x1, eq
80		msr	CORTEX_A76_CPUACTLR2_EL1, x3
81		eret	/* ERET implies ISB */
82	.endif
831:
84	/*
85	 * Always enable v4 mitigation during EL3 execution.  This is not
86	 * required for the fast path above because it does not perform any
87	 * memory loads.
88	 */
89	mrs	x2, CORTEX_A76_CPUACTLR2_EL1
90	orr	x2, x2, #CORTEX_A76_CPUACTLR2_EL1_DISABLE_LOAD_PASS_STORE
91	msr	CORTEX_A76_CPUACTLR2_EL1, x2
92	isb
93
94	/*
95	 * The caller may have passed arguments to EL3 via x2-x3.
96	 * Restore these registers from the context before jumping to the
97	 * main runtime vector table entry.
98	 */
99	ldp	x2, x3, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X2]
100	.endm
101
102vector_base cortex_a76_wa_cve_2018_3639_a76_vbar
103
104	/* ---------------------------------------------------------------------
105	 * Current EL with SP_EL0 : 0x0 - 0x200
106	 * ---------------------------------------------------------------------
107	 */
108vector_entry cortex_a76_sync_exception_sp_el0
109	b	sync_exception_sp_el0
110end_vector_entry cortex_a76_sync_exception_sp_el0
111
112vector_entry cortex_a76_irq_sp_el0
113	b	irq_sp_el0
114end_vector_entry cortex_a76_irq_sp_el0
115
116vector_entry cortex_a76_fiq_sp_el0
117	b	fiq_sp_el0
118end_vector_entry cortex_a76_fiq_sp_el0
119
120vector_entry cortex_a76_serror_sp_el0
121	b	serror_sp_el0
122end_vector_entry cortex_a76_serror_sp_el0
123
124	/* ---------------------------------------------------------------------
125	 * Current EL with SP_ELx: 0x200 - 0x400
126	 * ---------------------------------------------------------------------
127	 */
128vector_entry cortex_a76_sync_exception_sp_elx
129	b	sync_exception_sp_elx
130end_vector_entry cortex_a76_sync_exception_sp_elx
131
132vector_entry cortex_a76_irq_sp_elx
133	b	irq_sp_elx
134end_vector_entry cortex_a76_irq_sp_elx
135
136vector_entry cortex_a76_fiq_sp_elx
137	b	fiq_sp_elx
138end_vector_entry cortex_a76_fiq_sp_elx
139
140vector_entry cortex_a76_serror_sp_elx
141	b	serror_sp_elx
142end_vector_entry cortex_a76_serror_sp_elx
143
144	/* ---------------------------------------------------------------------
145	 * Lower EL using AArch64 : 0x400 - 0x600
146	 * ---------------------------------------------------------------------
147	 */
148vector_entry cortex_a76_sync_exception_aarch64
149	apply_cve_2018_3639_wa _is_sync_exception=1 _esr_el3_val=ESR_EL3_A64_SMC0
150	b	sync_exception_aarch64
151end_vector_entry cortex_a76_sync_exception_aarch64
152
153vector_entry cortex_a76_irq_aarch64
154	apply_cve_2018_3639_wa _is_sync_exception=0 _esr_el3_val=ESR_EL3_A64_SMC0
155	b	irq_aarch64
156end_vector_entry cortex_a76_irq_aarch64
157
158vector_entry cortex_a76_fiq_aarch64
159	apply_cve_2018_3639_wa _is_sync_exception=0 _esr_el3_val=ESR_EL3_A64_SMC0
160	b	fiq_aarch64
161end_vector_entry cortex_a76_fiq_aarch64
162
163vector_entry cortex_a76_serror_aarch64
164	apply_cve_2018_3639_wa _is_sync_exception=0 _esr_el3_val=ESR_EL3_A64_SMC0
165	b	serror_aarch64
166end_vector_entry cortex_a76_serror_aarch64
167
168	/* ---------------------------------------------------------------------
169	 * Lower EL using AArch32 : 0x600 - 0x800
170	 * ---------------------------------------------------------------------
171	 */
172vector_entry cortex_a76_sync_exception_aarch32
173	apply_cve_2018_3639_wa _is_sync_exception=1 _esr_el3_val=ESR_EL3_A32_SMC0
174	b	sync_exception_aarch32
175end_vector_entry cortex_a76_sync_exception_aarch32
176
177vector_entry cortex_a76_irq_aarch32
178	apply_cve_2018_3639_wa _is_sync_exception=0 _esr_el3_val=ESR_EL3_A32_SMC0
179	b	irq_aarch32
180end_vector_entry cortex_a76_irq_aarch32
181
182vector_entry cortex_a76_fiq_aarch32
183	apply_cve_2018_3639_wa _is_sync_exception=0 _esr_el3_val=ESR_EL3_A32_SMC0
184	b	fiq_aarch32
185end_vector_entry cortex_a76_fiq_aarch32
186
187vector_entry cortex_a76_serror_aarch32
188	apply_cve_2018_3639_wa _is_sync_exception=0 _esr_el3_val=ESR_EL3_A32_SMC0
189	b	serror_aarch32
190end_vector_entry cortex_a76_serror_aarch32
191
192	/* --------------------------------------------------
193	 * Errata Workaround for Cortex A76 Errata #1130799.
194	 * This applies only to revision <= r2p0 of Cortex A76.
195	 * Inputs:
196	 * x0: variant[4:7] and revision[0:3] of current cpu.
197	 * Shall clobber: x0-x17
198	 * --------------------------------------------------
199	 */
200func errata_a76_1130799_wa
201	/*
202	 * Compare x0 against revision r2p0
203	 */
204	mov	x17, x30
205	bl	check_errata_1130799
206	cbz	x0, 1f
207	mrs	x1, CORTEX_A76_CPUACTLR2_EL1
208	orr	x1, x1 ,#(1 << 59)
209	msr	CORTEX_A76_CPUACTLR2_EL1, x1
210	isb
2111:
212	ret	x17
213endfunc errata_a76_1130799_wa
214
215func check_errata_1130799
216	mov	x1, #0x20
217	b	cpu_rev_var_ls
218endfunc check_errata_1130799
219
220	/* --------------------------------------------------
221	 * Errata Workaround for Cortex A76 Errata #1220197.
222	 * This applies only to revision <= r2p0 of Cortex A76.
223	 * Inputs:
224	 * x0: variant[4:7] and revision[0:3] of current cpu.
225	 * Shall clobber: x0-x17
226	 * --------------------------------------------------
227	 */
228func errata_a76_1220197_wa
229/*
230 * Compare x0 against revision r2p0
231 */
232	mov	x17, x30
233	bl	check_errata_1220197
234	cbz	x0, 1f
235	mrs	x1, CORTEX_A76_CPUECTLR_EL1
236	orr	x1, x1, #CORTEX_A76_CPUECTLR_EL1_WS_THR_L2
237	msr	CORTEX_A76_CPUECTLR_EL1, x1
238	isb
2391:
240	ret	x17
241endfunc errata_a76_1220197_wa
242
243func check_errata_1220197
244	mov	x1, #0x20
245	b	cpu_rev_var_ls
246endfunc check_errata_1220197
247
248func check_errata_cve_2018_3639
249#if WORKAROUND_CVE_2018_3639
250	mov	x0, #ERRATA_APPLIES
251#else
252	mov	x0, #ERRATA_MISSING
253#endif
254	ret
255endfunc check_errata_cve_2018_3639
256
257func cortex_a76_disable_wa_cve_2018_3639
258	mrs	x0, CORTEX_A76_CPUACTLR2_EL1
259	bic	x0, x0, #CORTEX_A76_CPUACTLR2_EL1_DISABLE_LOAD_PASS_STORE
260	msr	CORTEX_A76_CPUACTLR2_EL1, x0
261	isb
262	ret
263endfunc cortex_a76_disable_wa_cve_2018_3639
264
265	/* -------------------------------------------------
266	 * The CPU Ops reset function for Cortex-A76.
267	 * Shall clobber: x0-x19
268	 * -------------------------------------------------
269	 */
270func cortex_a76_reset_func
271	mov	x19, x30
272	bl	cpu_get_rev_var
273	mov	x18, x0
274
275#if ERRATA_A76_1130799
276	mov	x0, x18
277	bl	errata_a76_1130799_wa
278#endif
279
280#if ERRATA_A76_1220197
281	mov	x0, x18
282	bl	errata_a76_1220197_wa
283#endif
284
285#if WORKAROUND_CVE_2018_3639
286	/* If the PE implements SSBS, we don't need the dynamic workaround */
287	mrs	x0, id_aa64pfr1_el1
288	lsr	x0, x0, #ID_AA64PFR1_EL1_SSBS_SHIFT
289	and     x0, x0, #ID_AA64PFR1_EL1_SSBS_MASK
290	cbnz	x0, 1f
291
292	mrs	x0, CORTEX_A76_CPUACTLR2_EL1
293	orr	x0, x0, #CORTEX_A76_CPUACTLR2_EL1_DISABLE_LOAD_PASS_STORE
294	msr	CORTEX_A76_CPUACTLR2_EL1, x0
295	isb
296
297#ifdef IMAGE_BL31
298	/*
299	 * The Cortex-A76 generic vectors are overwritten to use the vectors
300	 * defined above.  This is required in order to apply mitigation
301	 * against CVE-2018-3639 on exception entry from lower ELs.
302	 */
303	adr	x0, cortex_a76_wa_cve_2018_3639_a76_vbar
304	msr	vbar_el3, x0
305	isb
306#endif
307
3081:
309#endif
310
311#if ERRATA_DSU_936184
312	bl	errata_dsu_936184_wa
313#endif
314	ret	x19
315endfunc cortex_a76_reset_func
316
317	/* ---------------------------------------------
318	 * HW will do the cache maintenance while powering down
319	 * ---------------------------------------------
320	 */
321func cortex_a76_core_pwr_dwn
322	/* ---------------------------------------------
323	 * Enable CPU power down bit in power control register
324	 * ---------------------------------------------
325	 */
326	mrs	x0, CORTEX_A76_CPUPWRCTLR_EL1
327	orr	x0, x0, #CORTEX_A76_CORE_PWRDN_EN_MASK
328	msr	CORTEX_A76_CPUPWRCTLR_EL1, x0
329	isb
330	ret
331endfunc cortex_a76_core_pwr_dwn
332
333#if REPORT_ERRATA
334/*
335 * Errata printing function for Cortex Cortex A76. Must follow AAPCS.
336 */
337func cortex_a76_errata_report
338	stp	x8, x30, [sp, #-16]!
339
340	bl	cpu_get_rev_var
341	mov	x8, x0
342
343	/*
344	 * Report all errata. The revision-variant information is passed to
345	 * checking functions of each errata.
346	 */
347	report_errata ERRATA_A76_1130799, cortex_a76, 1130799
348	report_errata ERRATA_A76_1220197, cortex_a76, 1220197
349	report_errata WORKAROUND_CVE_2018_3639, cortex_a76, cve_2018_3639
350	report_errata ERRATA_DSU_936184, cortex_a76, dsu_936184
351
352	ldp	x8, x30, [sp], #16
353	ret
354endfunc cortex_a76_errata_report
355#endif
356
357	/* ---------------------------------------------
358	 * This function provides cortex_a76 specific
359	 * register information for crash reporting.
360	 * It needs to return with x6 pointing to
361	 * a list of register names in ascii and
362	 * x8 - x15 having values of registers to be
363	 * reported.
364	 * ---------------------------------------------
365	 */
366.section .rodata.cortex_a76_regs, "aS"
367cortex_a76_regs:  /* The ascii list of register names to be reported */
368	.asciz	"cpuectlr_el1", ""
369
370func cortex_a76_cpu_reg_dump
371	adr	x6, cortex_a76_regs
372	mrs	x8, CORTEX_A76_CPUECTLR_EL1
373	ret
374endfunc cortex_a76_cpu_reg_dump
375
376declare_cpu_ops_wa cortex_a76, CORTEX_A76_MIDR, \
377	cortex_a76_reset_func, \
378	CPU_NO_EXTRA1_FUNC, \
379	cortex_a76_disable_wa_cve_2018_3639, \
380	cortex_a76_core_pwr_dwn
381