xref: /rk3399_ARM-atf/lib/cpus/aarch64/cortex_a76.S (revision 034a2e3ef8a9e8e58f7cb7fab6db4ee60b2f9c29)
1/*
2 * Copyright (c) 2017-2022, ARM Limited and Contributors. All rights reserved.
3 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 */
6
7#include <arch.h>
8#include <asm_macros.S>
9#include <common/bl_common.h>
10#include <cortex_a76.h>
11#include <cpu_macros.S>
12#include <plat_macros.S>
13#include <services/arm_arch_svc.h>
14#include "wa_cve_2022_23960_bhb.S"
15
16/* Hardware handled coherency */
17#if HW_ASSISTED_COHERENCY == 0
18#error "Cortex-A76 must be compiled with HW_ASSISTED_COHERENCY enabled"
19#endif
20	.globl cortex_a76_reset_func
21	.globl cortex_a76_core_pwr_dwn
22	.globl cortex_a76_disable_wa_cve_2018_3639
23
24/* 64-bit only core */
25#if CTX_INCLUDE_AARCH32_REGS == 1
26#error "Cortex-A76 supports only AArch64. Compile with CTX_INCLUDE_AARCH32_REGS=0"
27#endif
28
29#define ESR_EL3_A64_SMC0	0x5e000000
30#define ESR_EL3_A32_SMC0	0x4e000000
31
32#if DYNAMIC_WORKAROUND_CVE_2018_3639
33	/*
34	 * This macro applies the mitigation for CVE-2018-3639.
35	 * It implements a fast path where `SMCCC_ARCH_WORKAROUND_2`
36	 * SMC calls from a lower EL running in AArch32 or AArch64
37	 * will go through the fast and return early.
38	 *
39	 * The macro saves x2-x3 to the context. In the fast path
40	 * x0-x3 registers do not need to be restored as the calling
41	 * context will have saved them. The macro also saves
42	 * x29-x30 to the context in the sync_exception path.
43	 */
44	.macro apply_cve_2018_3639_wa _is_sync_exception _esr_el3_val
45	stp	x2, x3, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X2]
46	.if \_is_sync_exception
47	stp	x29, x30, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X29]
48	mov_imm	w2, \_esr_el3_val
49	bl	apply_cve_2018_3639_sync_wa
50	ldp	x29, x30, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X29]
51	.endif
52	/*
53	 * Always enable v4 mitigation during EL3 execution. This is not
54	 * required for the fast path above because it does not perform any
55	 * memory loads.
56	 */
57	mrs	x2, CORTEX_A76_CPUACTLR2_EL1
58	orr	x2, x2, #CORTEX_A76_CPUACTLR2_EL1_DISABLE_LOAD_PASS_STORE
59	msr	CORTEX_A76_CPUACTLR2_EL1, x2
60	isb
61
62	/*
63	 * The caller may have passed arguments to EL3 via x2-x3.
64	 * Restore these registers from the context before jumping to the
65	 * main runtime vector table entry.
66	 */
67	ldp	x2, x3, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X2]
68	.endm
69#endif /* DYNAMIC_WORKAROUND_CVE_2018_3639 */
70
71#if DYNAMIC_WORKAROUND_CVE_2018_3639 || WORKAROUND_CVE_2022_23960
72vector_base cortex_a76_wa_cve_vbar
73
74	/* ---------------------------------------------------------------------
75	 * Current EL with SP_EL0 : 0x0 - 0x200
76	 * ---------------------------------------------------------------------
77	 */
78vector_entry cortex_a76_sync_exception_sp_el0
79	b	sync_exception_sp_el0
80end_vector_entry cortex_a76_sync_exception_sp_el0
81
82vector_entry cortex_a76_irq_sp_el0
83	b	irq_sp_el0
84end_vector_entry cortex_a76_irq_sp_el0
85
86vector_entry cortex_a76_fiq_sp_el0
87	b	fiq_sp_el0
88end_vector_entry cortex_a76_fiq_sp_el0
89
90vector_entry cortex_a76_serror_sp_el0
91	b	serror_sp_el0
92end_vector_entry cortex_a76_serror_sp_el0
93
94	/* ---------------------------------------------------------------------
95	 * Current EL with SP_ELx: 0x200 - 0x400
96	 * ---------------------------------------------------------------------
97	 */
98vector_entry cortex_a76_sync_exception_sp_elx
99	b	sync_exception_sp_elx
100end_vector_entry cortex_a76_sync_exception_sp_elx
101
102vector_entry cortex_a76_irq_sp_elx
103	b	irq_sp_elx
104end_vector_entry cortex_a76_irq_sp_elx
105
106vector_entry cortex_a76_fiq_sp_elx
107	b	fiq_sp_elx
108end_vector_entry cortex_a76_fiq_sp_elx
109
110vector_entry cortex_a76_serror_sp_elx
111	b	serror_sp_elx
112end_vector_entry cortex_a76_serror_sp_elx
113
114	/* ---------------------------------------------------------------------
115	 * Lower EL using AArch64 : 0x400 - 0x600
116	 * ---------------------------------------------------------------------
117	 */
118vector_entry cortex_a76_sync_exception_aarch64
119
120#if WORKAROUND_CVE_2022_23960
121	apply_cve_2022_23960_bhb_wa CORTEX_A76_BHB_LOOP_COUNT
122#endif /* WORKAROUND_CVE_2022_23960 */
123
124#if DYNAMIC_WORKAROUND_CVE_2018_3639
125	apply_cve_2018_3639_wa _is_sync_exception=1 _esr_el3_val=ESR_EL3_A64_SMC0
126#endif /* DYNAMIC_WORKAROUND_CVE_2018_3639*/
127
128	b	sync_exception_aarch64
129end_vector_entry cortex_a76_sync_exception_aarch64
130
131vector_entry cortex_a76_irq_aarch64
132
133#if WORKAROUND_CVE_2022_23960
134	apply_cve_2022_23960_bhb_wa CORTEX_A76_BHB_LOOP_COUNT
135#endif /* WORKAROUND_CVE_2022_23960 */
136
137#if DYNAMIC_WORKAROUND_CVE_2018_3639
138	apply_cve_2018_3639_wa _is_sync_exception=0 _esr_el3_val=ESR_EL3_A64_SMC0
139#endif /* DYNAMIC_WORKAROUND_CVE_2018_3639*/
140
141	b	irq_aarch64
142end_vector_entry cortex_a76_irq_aarch64
143
144vector_entry cortex_a76_fiq_aarch64
145
146#if WORKAROUND_CVE_2022_23960
147	apply_cve_2022_23960_bhb_wa CORTEX_A76_BHB_LOOP_COUNT
148#endif /* WORKAROUND_CVE_2022_23960 */
149
150#if DYNAMIC_WORKAROUND_CVE_2018_3639
151	apply_cve_2018_3639_wa _is_sync_exception=0 _esr_el3_val=ESR_EL3_A64_SMC0
152#endif /* DYNAMIC_WORKAROUND_CVE_2018_3639*/
153
154	b	fiq_aarch64
155end_vector_entry cortex_a76_fiq_aarch64
156
157vector_entry cortex_a76_serror_aarch64
158
159#if WORKAROUND_CVE_2022_23960
160	apply_cve_2022_23960_bhb_wa CORTEX_A76_BHB_LOOP_COUNT
161#endif /* WORKAROUND_CVE_2022_23960 */
162
163#if DYNAMIC_WORKAROUND_CVE_2018_3639
164	apply_cve_2018_3639_wa _is_sync_exception=0 _esr_el3_val=ESR_EL3_A64_SMC0
165#endif /* DYNAMIC_WORKAROUND_CVE_2018_3639*/
166
167	b	serror_aarch64
168end_vector_entry cortex_a76_serror_aarch64
169
170	/* ---------------------------------------------------------------------
171	 * Lower EL using AArch32 : 0x600 - 0x800
172	 * ---------------------------------------------------------------------
173	 */
174vector_entry cortex_a76_sync_exception_aarch32
175
176#if WORKAROUND_CVE_2022_23960
177	apply_cve_2022_23960_bhb_wa CORTEX_A76_BHB_LOOP_COUNT
178#endif /* WORKAROUND_CVE_2022_23960 */
179
180#if DYNAMIC_WORKAROUND_CVE_2018_3639
181	apply_cve_2018_3639_wa _is_sync_exception=1 _esr_el3_val=ESR_EL3_A32_SMC0
182#endif /* DYNAMIC_WORKAROUND_CVE_2018_3639*/
183
184	b	sync_exception_aarch32
185end_vector_entry cortex_a76_sync_exception_aarch32
186
187vector_entry cortex_a76_irq_aarch32
188
189#if WORKAROUND_CVE_2022_23960
190	apply_cve_2022_23960_bhb_wa CORTEX_A76_BHB_LOOP_COUNT
191#endif /* WORKAROUND_CVE_2022_23960 */
192
193#if DYNAMIC_WORKAROUND_CVE_2018_3639
194	apply_cve_2018_3639_wa _is_sync_exception=0 _esr_el3_val=ESR_EL3_A32_SMC0
195#endif /* DYNAMIC_WORKAROUND_CVE_2018_3639*/
196
197	b	irq_aarch32
198end_vector_entry cortex_a76_irq_aarch32
199
200vector_entry cortex_a76_fiq_aarch32
201
202#if WORKAROUND_CVE_2022_23960
203	apply_cve_2022_23960_bhb_wa CORTEX_A76_BHB_LOOP_COUNT
204#endif /* WORKAROUND_CVE_2022_23960 */
205
206#if DYNAMIC_WORKAROUND_CVE_2018_3639
207	apply_cve_2018_3639_wa _is_sync_exception=0 _esr_el3_val=ESR_EL3_A32_SMC0
208#endif /* DYNAMIC_WORKAROUND_CVE_2018_3639*/
209
210	b	fiq_aarch32
211end_vector_entry cortex_a76_fiq_aarch32
212
213vector_entry cortex_a76_serror_aarch32
214
215#if WORKAROUND_CVE_2022_23960
216	apply_cve_2022_23960_bhb_wa CORTEX_A76_BHB_LOOP_COUNT
217#endif /* WORKAROUND_CVE_2022_23960 */
218
219#if DYNAMIC_WORKAROUND_CVE_2018_3639
220	apply_cve_2018_3639_wa _is_sync_exception=0 _esr_el3_val=ESR_EL3_A32_SMC0
221#endif /* DYNAMIC_WORKAROUND_CVE_2018_3639*/
222
223	b	serror_aarch32
224end_vector_entry cortex_a76_serror_aarch32
225#endif /* DYNAMIC_WORKAROUND_CVE_2018_3639 || WORKAROUND_CVE_2022_23960 */
226
227#if DYNAMIC_WORKAROUND_CVE_2018_3639
228	/*
229	 * -----------------------------------------------------------------
230	 * This function applies the mitigation for CVE-2018-3639
231	 * specifically for sync exceptions. It implements a fast path
232	 * where `SMCCC_ARCH_WORKAROUND_2` SMC calls from a lower EL
233	 * running in AArch64 will go through the fast and return early.
234	 *
235	 * In the fast path x0-x3 registers do not need to be restored as the
236	 * calling context will have saved them.
237	 *
238	 * Caller must pass value of esr_el3 to compare via x2.
239	 * Save and restore these registers outside of this function from the
240	 * context before jumping to the main runtime vector table entry.
241	 *
242	 * Shall clobber: x0-x3, x30
243	 * -----------------------------------------------------------------
244	 */
245func apply_cve_2018_3639_sync_wa
246	/*
247	 * Ensure SMC is coming from A64/A32 state on #0
248	 * with W0 = SMCCC_ARCH_WORKAROUND_2
249	 *
250	 * This sequence evaluates as:
251	 *    (W0==SMCCC_ARCH_WORKAROUND_2) ? (ESR_EL3==SMC#0) : (NE)
252	 * allowing use of a single branch operation
253	 * X2 populated outside this function with the SMC FID.
254	 */
255	orr	w3, wzr, #SMCCC_ARCH_WORKAROUND_2
256	cmp	x0, x3
257	mrs	x3, esr_el3
258
259	ccmp	w2, w3, #0, eq
260	/*
261	 * Static predictor will predict a fall-through, optimizing
262	 * the `SMCCC_ARCH_WORKAROUND_2` fast path.
263	 */
264	bne	1f
265
266	/*
267	* The sequence below implements the `SMCCC_ARCH_WORKAROUND_2`
268	* fast path.
269	*/
270	cmp	x1, xzr /* enable/disable check */
271
272	/*
273	 * When the calling context wants mitigation disabled,
274	 * we program the mitigation disable function in the
275	 * CPU context, which gets invoked on subsequent exits from
276	 * EL3 via the `el3_exit` function. Otherwise NULL is
277	 * programmed in the CPU context, which results in caller's
278	 * inheriting the EL3 mitigation state (enabled) on subsequent
279	 * `el3_exit`.
280	 */
281	mov	x0, xzr
282	adr	x1, cortex_a76_disable_wa_cve_2018_3639
283	csel	x1, x1, x0, eq
284	str	x1, [sp, #CTX_CVE_2018_3639_OFFSET + CTX_CVE_2018_3639_DISABLE]
285
286	mrs	x2, CORTEX_A76_CPUACTLR2_EL1
287	orr	x1, x2, #CORTEX_A76_CPUACTLR2_EL1_DISABLE_LOAD_PASS_STORE
288	bic	x3, x2, #CORTEX_A76_CPUACTLR2_EL1_DISABLE_LOAD_PASS_STORE
289	csel	x3, x3, x1, eq
290	msr	CORTEX_A76_CPUACTLR2_EL1, x3
291	ldp	x29, x30, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X29]
292	/*
293	* `SMCCC_ARCH_WORKAROUND_2`fast path return to lower EL.
294	*/
295	exception_return /* exception_return contains ISB */
2961:
297	ret
298endfunc apply_cve_2018_3639_sync_wa
299#endif /* DYNAMIC_WORKAROUND_CVE_2018_3639 */
300
301	/* --------------------------------------------------
302	 * Errata Workaround for Cortex A76 Errata #1073348.
303	 * This applies only to revision <= r1p0 of Cortex A76.
304	 * Inputs:
305	 * x0: variant[4:7] and revision[0:3] of current cpu.
306	 * Shall clobber: x0-x17
307	 * --------------------------------------------------
308	 */
309func errata_a76_1073348_wa
310	/*
311	 * Compare x0 against revision r1p0
312	 */
313	mov	x17, x30
314	bl	check_errata_1073348
315	cbz	x0, 1f
316	mrs	x1, CORTEX_A76_CPUACTLR_EL1
317	orr	x1, x1 ,#CORTEX_A76_CPUACTLR_EL1_DISABLE_STATIC_PREDICTION
318	msr	CORTEX_A76_CPUACTLR_EL1, x1
319	isb
3201:
321	ret	x17
322endfunc errata_a76_1073348_wa
323
324func check_errata_1073348
325	mov	x1, #0x10
326	b	cpu_rev_var_ls
327endfunc check_errata_1073348
328
329	/* --------------------------------------------------
330	 * Errata Workaround for Cortex A76 Errata #1130799.
331	 * This applies only to revision <= r2p0 of Cortex A76.
332	 * Inputs:
333	 * x0: variant[4:7] and revision[0:3] of current cpu.
334	 * Shall clobber: x0-x17
335	 * --------------------------------------------------
336	 */
337func errata_a76_1130799_wa
338	/*
339	 * Compare x0 against revision r2p0
340	 */
341	mov	x17, x30
342	bl	check_errata_1130799
343	cbz	x0, 1f
344	mrs	x1, CORTEX_A76_CPUACTLR2_EL1
345	orr	x1, x1 ,#(1 << 59)
346	msr	CORTEX_A76_CPUACTLR2_EL1, x1
347	isb
3481:
349	ret	x17
350endfunc errata_a76_1130799_wa
351
352func check_errata_1130799
353	mov	x1, #0x20
354	b	cpu_rev_var_ls
355endfunc check_errata_1130799
356
357	/* --------------------------------------------------
358	 * Errata Workaround for Cortex A76 Errata #1220197.
359	 * This applies only to revision <= r2p0 of Cortex A76.
360	 * Inputs:
361	 * x0: variant[4:7] and revision[0:3] of current cpu.
362	 * Shall clobber: x0-x17
363	 * --------------------------------------------------
364	 */
365func errata_a76_1220197_wa
366/*
367 * Compare x0 against revision r2p0
368 */
369	mov	x17, x30
370	bl	check_errata_1220197
371	cbz	x0, 1f
372	mrs	x1, CORTEX_A76_CPUECTLR_EL1
373	orr	x1, x1, #CORTEX_A76_CPUECTLR_EL1_WS_THR_L2
374	msr	CORTEX_A76_CPUECTLR_EL1, x1
375	isb
3761:
377	ret	x17
378endfunc errata_a76_1220197_wa
379
380func check_errata_1220197
381	mov	x1, #0x20
382	b	cpu_rev_var_ls
383endfunc check_errata_1220197
384
385	/* --------------------------------------------------
386	 * Errata Workaround for Cortex A76 Errata #1257314.
387	 * This applies only to revision <= r3p0 of Cortex A76.
388	 * Inputs:
389	 * x0: variant[4:7] and revision[0:3] of current cpu.
390	 * Shall clobber: x0-x17
391	 * --------------------------------------------------
392	 */
393func errata_a76_1257314_wa
394	/*
395	 * Compare x0 against revision r3p0
396	 */
397	mov	x17, x30
398	bl	check_errata_1257314
399	cbz	x0, 1f
400	mrs	x1, CORTEX_A76_CPUACTLR3_EL1
401	orr	x1, x1, CORTEX_A76_CPUACTLR3_EL1_BIT_10
402	msr	CORTEX_A76_CPUACTLR3_EL1, x1
403	isb
4041:
405	ret	x17
406endfunc errata_a76_1257314_wa
407
408func check_errata_1257314
409	mov	x1, #0x30
410	b	cpu_rev_var_ls
411endfunc check_errata_1257314
412
413	/* --------------------------------------------------
414	 * Errata Workaround for Cortex A76 Errata #1262888.
415	 * This applies only to revision <= r3p0 of Cortex A76.
416	 * Inputs:
417	 * x0: variant[4:7] and revision[0:3] of current cpu.
418	 * Shall clobber: x0-x17
419	 * --------------------------------------------------
420	 */
421func errata_a76_1262888_wa
422	/*
423	 * Compare x0 against revision r3p0
424	 */
425	mov	x17, x30
426	bl	check_errata_1262888
427	cbz	x0, 1f
428	mrs	x1, CORTEX_A76_CPUECTLR_EL1
429	orr	x1, x1, CORTEX_A76_CPUECTLR_EL1_BIT_51
430	msr	CORTEX_A76_CPUECTLR_EL1, x1
431	isb
4321:
433	ret	x17
434endfunc errata_a76_1262888_wa
435
436func check_errata_1262888
437	mov	x1, #0x30
438	b	cpu_rev_var_ls
439endfunc check_errata_1262888
440
441	/* ---------------------------------------------------
442	 * Errata Workaround for Cortex A76 Errata #1286807.
443	 * This applies only to revision <= r3p0 of Cortex A76.
444	 * Due to the nature of the errata it is applied unconditionally
445	 * when built in, report it as applicable in this case
446	 * ---------------------------------------------------
447	 */
448func check_errata_1286807
449#if ERRATA_A76_1286807
450	mov x0, #ERRATA_APPLIES
451	ret
452#else
453	mov	x1, #0x30
454	b	cpu_rev_var_ls
455#endif
456endfunc check_errata_1286807
457
458	/* --------------------------------------------------
459	 * Errata workaround for Cortex A76 Errata #1791580.
460	 * This applies to revisions <= r4p0 of Cortex A76.
461	 * Inputs:
462	 * x0: variant[4:7] and revision[0:3] of current cpu.
463	 * Shall clobber: x0-x17
464	 * --------------------------------------------------
465	 */
466func errata_a76_1791580_wa
467	/* Compare x0 against revision r4p0 */
468	mov	x17, x30
469	bl	check_errata_1791580
470	cbz	x0, 1f
471	mrs	x1, CORTEX_A76_CPUACTLR2_EL1
472	orr	x1, x1, CORTEX_A76_CPUACTLR2_EL1_BIT_2
473	msr	CORTEX_A76_CPUACTLR2_EL1, x1
474	isb
4751:
476	ret	x17
477endfunc errata_a76_1791580_wa
478
479func check_errata_1791580
480	/* Applies to everything <=r4p0. */
481	mov	x1, #0x40
482	b	cpu_rev_var_ls
483endfunc check_errata_1791580
484
485	/* --------------------------------------------------
486	 * Errata Workaround for Cortex A76 Errata #1262606,
487	 * #1275112, and #1868343.  #1262606 and #1275112
488	 * apply to revisions <= r3p0 and #1868343 applies to
489	 * revisions <= r4p0.
490	 * Inputs:
491	 * x0: variant[4:7] and revision[0:3] of current cpu.
492	 * Shall clobber: x0-x17
493	 * --------------------------------------------------
494	 */
495
496func errata_a76_1262606_1275112_1868343_wa
497	mov	x17, x30
498
499/* Check for <= r3p0 cases and branch if check passes. */
500#if ERRATA_A76_1262606 || ERRATA_A76_1275112
501	bl	check_errata_1262606
502	cbnz	x0, 1f
503#endif
504
505/* Check for <= r4p0 cases and branch if check fails. */
506#if ERRATA_A76_1868343
507	bl	check_errata_1868343
508	cbz	x0, 2f
509#endif
5101:
511	mrs	x1, CORTEX_A76_CPUACTLR_EL1
512	orr	x1, x1, #CORTEX_A76_CPUACTLR_EL1_BIT_13
513	msr	CORTEX_A76_CPUACTLR_EL1, x1
514	isb
5152:
516	ret	x17
517endfunc errata_a76_1262606_1275112_1868343_wa
518
519func check_errata_1262606
520	mov	x1, #0x30
521	b	cpu_rev_var_ls
522endfunc check_errata_1262606
523
524func check_errata_1275112
525	mov	x1, #0x30
526	b	cpu_rev_var_ls
527endfunc check_errata_1275112
528
529func check_errata_1868343
530	mov	x1, #0x40
531	b	cpu_rev_var_ls
532endfunc check_errata_1868343
533
534/* --------------------------------------------------
535 * Errata Workaround for A76 Erratum 1946160.
536 * This applies to revisions r3p0 - r4p1 of A76.
537 * It also exists in r0p0 - r2p0 but there is no fix
538 * in those revisions.
539 * Inputs:
540 * x0: variant[4:7] and revision[0:3] of current cpu.
541 * Shall clobber: x0-x17
542 * --------------------------------------------------
543 */
544func errata_a76_1946160_wa
545	/* Compare x0 against revisions r3p0 - r4p1 */
546	mov	x17, x30
547	bl	check_errata_1946160
548	cbz	x0, 1f
549
550	mov	x0, #3
551	msr	S3_6_C15_C8_0, x0
552	ldr	x0, =0x10E3900002
553	msr	S3_6_C15_C8_2, x0
554	ldr	x0, =0x10FFF00083
555	msr	S3_6_C15_C8_3, x0
556	ldr	x0, =0x2001003FF
557	msr	S3_6_C15_C8_1, x0
558
559	mov	x0, #4
560	msr	S3_6_C15_C8_0, x0
561	ldr	x0, =0x10E3800082
562	msr	S3_6_C15_C8_2, x0
563	ldr	x0, =0x10FFF00083
564	msr	S3_6_C15_C8_3, x0
565	ldr	x0, =0x2001003FF
566	msr	S3_6_C15_C8_1, x0
567
568	mov	x0, #5
569	msr	S3_6_C15_C8_0, x0
570	ldr	x0, =0x10E3800200
571	msr	S3_6_C15_C8_2, x0
572	ldr	x0, =0x10FFF003E0
573	msr	S3_6_C15_C8_3, x0
574	ldr	x0, =0x2001003FF
575	msr	S3_6_C15_C8_1, x0
576
577	isb
5781:
579	ret	x17
580endfunc errata_a76_1946160_wa
581
582func check_errata_1946160
583	/* Applies to revisions r3p0 - r4p1. */
584	mov	x1, #0x30
585	mov	x2, #0x41
586	b	cpu_rev_var_range
587endfunc check_errata_1946160
588
589	/* ----------------------------------------------------
590	 * Errata Workaround for Cortex-A76 Errata #2743102
591	 * This applies to revisions <= r4p1 and is still open.
592	 * x0: variant[4:7] and revision[0:3] of current cpu.
593	 * Shall clobber: x0-x17
594	 * ----------------------------------------------------
595	 */
596func errata_a76_2743102_wa
597	mov	x17, x30
598	bl	check_errata_2743102
599	cbz	x0, 1f
600
601	/* dsb before isb of power down sequence */
602	dsb	sy
6031:
604	ret	x17
605endfunc errata_a76_2743102_wa
606
607func check_errata_2743102
608	/* Applies to all revisions <= r4p1 */
609	mov	x1, #0x41
610	b	cpu_rev_var_ls
611endfunc check_errata_2743102
612
613func check_errata_cve_2018_3639
614#if WORKAROUND_CVE_2018_3639
615	mov	x0, #ERRATA_APPLIES
616#else
617	mov	x0, #ERRATA_MISSING
618#endif
619	ret
620endfunc check_errata_cve_2018_3639
621
622func cortex_a76_disable_wa_cve_2018_3639
623	mrs	x0, CORTEX_A76_CPUACTLR2_EL1
624	bic	x0, x0, #CORTEX_A76_CPUACTLR2_EL1_DISABLE_LOAD_PASS_STORE
625	msr	CORTEX_A76_CPUACTLR2_EL1, x0
626	isb
627	ret
628endfunc cortex_a76_disable_wa_cve_2018_3639
629
630	/* --------------------------------------------------------------
631	 * Errata Workaround for Cortex A76 Errata #1165522.
632	 * This applies only to revisions <= r3p0 of Cortex A76.
633	 * Due to the nature of the errata it is applied unconditionally
634	 * when built in, report it as applicable in this case
635	 * --------------------------------------------------------------
636	 */
637func check_errata_1165522
638#if ERRATA_A76_1165522
639	mov	x0, #ERRATA_APPLIES
640	ret
641#else
642	mov	x1, #0x30
643	b	cpu_rev_var_ls
644#endif
645endfunc check_errata_1165522
646
647func check_errata_cve_2022_23960
648#if WORKAROUND_CVE_2022_23960
649	mov	x0, #ERRATA_APPLIES
650#else
651	mov	x0, #ERRATA_MISSING
652#endif /* WORKAROUND_CVE_2022_23960 */
653	ret
654endfunc check_errata_cve_2022_23960
655
656	/* -------------------------------------------------
657	 * The CPU Ops reset function for Cortex-A76.
658	 * Shall clobber: x0-x19
659	 * -------------------------------------------------
660	 */
661func cortex_a76_reset_func
662	mov	x19, x30
663	bl	cpu_get_rev_var
664	mov	x18, x0
665
666#if ERRATA_A76_1073348
667	mov	x0, x18
668	bl	errata_a76_1073348_wa
669#endif
670
671#if ERRATA_A76_1130799
672	mov	x0, x18
673	bl	errata_a76_1130799_wa
674#endif
675
676#if ERRATA_A76_1220197
677	mov	x0, x18
678	bl	errata_a76_1220197_wa
679#endif
680
681#if ERRATA_A76_1257314
682	mov	x0, x18
683	bl	errata_a76_1257314_wa
684#endif
685
686#if ERRATA_A76_1262606 || ERRATA_A76_1275112 || ERRATA_A76_1868343
687	mov	x0, x18
688	bl	errata_a76_1262606_1275112_1868343_wa
689#endif
690
691#if ERRATA_A76_1262888
692	mov	x0, x18
693	bl	errata_a76_1262888_wa
694#endif
695
696#if ERRATA_A76_1791580
697	mov	x0, x18
698	bl	errata_a76_1791580_wa
699#endif
700
701#if ERRATA_A76_1946160
702	mov	x0, x18
703	bl	errata_a76_1946160_wa
704#endif
705
706#if WORKAROUND_CVE_2018_3639
707	/* If the PE implements SSBS, we don't need the dynamic workaround */
708	mrs	x0, id_aa64pfr1_el1
709	lsr	x0, x0, #ID_AA64PFR1_EL1_SSBS_SHIFT
710	and	x0, x0, #ID_AA64PFR1_EL1_SSBS_MASK
711#if !DYNAMIC_WORKAROUND_CVE_2018_3639 && ENABLE_ASSERTIONS
712	cmp	x0, 0
713	ASM_ASSERT(ne)
714#endif
715#if DYNAMIC_WORKAROUND_CVE_2018_3639
716	cbnz	x0, 1f
717	mrs	x0, CORTEX_A76_CPUACTLR2_EL1
718	orr	x0, x0, #CORTEX_A76_CPUACTLR2_EL1_DISABLE_LOAD_PASS_STORE
719	msr	CORTEX_A76_CPUACTLR2_EL1, x0
720	isb
721
722#ifdef IMAGE_BL31
723	/*
724	 * The Cortex-A76 generic vectors are overwritten to use the vectors
725	 * defined above. This is required in order to apply mitigation
726	 * against CVE-2018-3639 on exception entry from lower ELs.
727	 * If the below vector table is used, skip overriding it again for
728	 *  CVE_2022_23960 as both use the same vbar.
729	 */
730	adr	x0, cortex_a76_wa_cve_vbar
731	msr	vbar_el3, x0
732	isb
733	b	2f
734#endif /* IMAGE_BL31 */
735
7361:
737#endif /* DYNAMIC_WORKAROUND_CVE_2018_3639 */
738#endif /* WORKAROUND_CVE_2018_3639 */
739
740#if IMAGE_BL31 && WORKAROUND_CVE_2022_23960
741	/*
742	 * The Cortex-A76 generic vectors are overridden to apply errata
743	 * mitigation on exception entry from lower ELs. This will be bypassed
744	 * if DYNAMIC_WORKAROUND_CVE_2018_3639 has overridden the vectors.
745	 */
746	adr	x0, cortex_a76_wa_cve_vbar
747	msr	vbar_el3, x0
748	isb
749#endif /* IMAGE_BL31 && WORKAROUND_CVE_2022_23960 */
7502:
751
752#if ERRATA_DSU_798953
753	bl	errata_dsu_798953_wa
754#endif
755
756#if ERRATA_DSU_936184
757	bl	errata_dsu_936184_wa
758#endif
759
760	ret	x19
761endfunc cortex_a76_reset_func
762
763	/* ---------------------------------------------
764	 * HW will do the cache maintenance while powering down
765	 * ---------------------------------------------
766	 */
767func cortex_a76_core_pwr_dwn
768	/* ---------------------------------------------
769	 * Enable CPU power down bit in power control register
770	 * ---------------------------------------------
771	 */
772	mrs	x0, CORTEX_A76_CPUPWRCTLR_EL1
773	orr	x0, x0, #CORTEX_A76_CORE_PWRDN_EN_MASK
774	msr	CORTEX_A76_CPUPWRCTLR_EL1, x0
775#if ERRATA_A76_2743102
776	mov	x15, x30
777	bl	cpu_get_rev_var
778	bl	errata_a76_2743102_wa
779	mov	x30, x15
780#endif /* ERRATA_A76_2743102 */
781	isb
782	ret
783endfunc cortex_a76_core_pwr_dwn
784
785#if REPORT_ERRATA
786/*
787 * Errata printing function for Cortex A76. Must follow AAPCS.
788 */
789func cortex_a76_errata_report
790	stp	x8, x30, [sp, #-16]!
791
792	bl	cpu_get_rev_var
793	mov	x8, x0
794
795	/*
796	 * Report all errata. The revision-variant information is passed to
797	 * checking functions of each errata.
798	 */
799	report_errata ERRATA_A76_1073348, cortex_a76, 1073348
800	report_errata ERRATA_A76_1130799, cortex_a76, 1130799
801	report_errata ERRATA_A76_1165522, cortex_a76, 1165522
802	report_errata ERRATA_A76_1220197, cortex_a76, 1220197
803	report_errata ERRATA_A76_1257314, cortex_a76, 1257314
804	report_errata ERRATA_A76_1262606, cortex_a76, 1262606
805	report_errata ERRATA_A76_1262888, cortex_a76, 1262888
806	report_errata ERRATA_A76_1275112, cortex_a76, 1275112
807	report_errata ERRATA_A76_1286807, cortex_a76, 1286807
808	report_errata ERRATA_A76_1791580, cortex_a76, 1791580
809	report_errata ERRATA_A76_1868343, cortex_a76, 1868343
810	report_errata ERRATA_A76_1946160, cortex_a76, 1946160
811	report_errata ERRATA_A76_2743102, cortex_a76, 2743102
812	report_errata WORKAROUND_CVE_2018_3639, cortex_a76, cve_2018_3639
813	report_errata ERRATA_DSU_798953, cortex_a76, dsu_798953
814	report_errata ERRATA_DSU_936184, cortex_a76, dsu_936184
815	report_errata WORKAROUND_CVE_2022_23960, cortex_a76, cve_2022_23960
816
817	ldp	x8, x30, [sp], #16
818	ret
819endfunc cortex_a76_errata_report
820#endif
821
822	/* ---------------------------------------------
823	 * This function provides cortex_a76 specific
824	 * register information for crash reporting.
825	 * It needs to return with x6 pointing to
826	 * a list of register names in ascii and
827	 * x8 - x15 having values of registers to be
828	 * reported.
829	 * ---------------------------------------------
830	 */
831.section .rodata.cortex_a76_regs, "aS"
832cortex_a76_regs:  /* The ascii list of register names to be reported */
833	.asciz	"cpuectlr_el1", ""
834
835func cortex_a76_cpu_reg_dump
836	adr	x6, cortex_a76_regs
837	mrs	x8, CORTEX_A76_CPUECTLR_EL1
838	ret
839endfunc cortex_a76_cpu_reg_dump
840
841declare_cpu_ops_wa cortex_a76, CORTEX_A76_MIDR, \
842	cortex_a76_reset_func, \
843	CPU_NO_EXTRA1_FUNC, \
844	cortex_a76_disable_wa_cve_2018_3639, \
845	CPU_NO_EXTRA3_FUNC, \
846	cortex_a76_core_pwr_dwn
847