xref: /rk3399_ARM-atf/lib/cpus/aarch64/denver.S (revision 522a22771f0e40866a7361b2f8e416b8cb716a1c)
1/*
2 * Copyright (c) 2015-2018, ARM Limited and Contributors. All rights reserved.
3 * Copyright (c) 2020, NVIDIA Corporation. All rights reserved.
4 *
5 * SPDX-License-Identifier: BSD-3-Clause
6 */
7
8#include <arch.h>
9#include <asm_macros.S>
10#include <assert_macros.S>
11#include <context.h>
12#include <denver.h>
13#include <cpu_macros.S>
14#include <plat_macros.S>
15
16	/* -------------------------------------------------
17	 * CVE-2017-5715 mitigation
18	 *
19	 * Flush the indirect branch predictor and RSB on
20	 * entry to EL3 by issuing a newly added instruction
21	 * for Denver CPUs.
22	 *
23	 * To achieve this without performing any branch
24	 * instruction, a per-cpu vbar is installed which
25	 * executes the workaround and then branches off to
26	 * the corresponding vector entry in the main vector
27	 * table.
28	 * -------------------------------------------------
29	 */
30vector_base workaround_bpflush_runtime_exceptions
31
32	.macro	apply_workaround
33	stp	x0, x1, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X0]
34
35	/* Disable cycle counter when event counting is prohibited */
36	mrs	x1, pmcr_el0
37	orr	x0, x1, #PMCR_EL0_DP_BIT
38	msr	pmcr_el0, x0
39	isb
40
41	/* -------------------------------------------------
42	 * A new write-only system register where a write of
43	 * 1 to bit 0 will cause the indirect branch predictor
44	 * and RSB to be flushed.
45	 *
46	 * A write of 0 to bit 0 will be ignored. A write of
47	 * 1 to any other bit will cause an MCA.
48	 * -------------------------------------------------
49	 */
50	mov	x0, #1
51	msr	s3_0_c15_c0_6, x0
52	isb
53
54	ldp	x0, x1, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X0]
55	.endm
56
57	/* ---------------------------------------------------------------------
58	 * Current EL with SP_EL0 : 0x0 - 0x200
59	 * ---------------------------------------------------------------------
60	 */
61vector_entry workaround_bpflush_sync_exception_sp_el0
62	b	sync_exception_sp_el0
63end_vector_entry workaround_bpflush_sync_exception_sp_el0
64
65vector_entry workaround_bpflush_irq_sp_el0
66	b	irq_sp_el0
67end_vector_entry workaround_bpflush_irq_sp_el0
68
69vector_entry workaround_bpflush_fiq_sp_el0
70	b	fiq_sp_el0
71end_vector_entry workaround_bpflush_fiq_sp_el0
72
73vector_entry workaround_bpflush_serror_sp_el0
74	b	serror_sp_el0
75end_vector_entry workaround_bpflush_serror_sp_el0
76
77	/* ---------------------------------------------------------------------
78	 * Current EL with SP_ELx: 0x200 - 0x400
79	 * ---------------------------------------------------------------------
80	 */
81vector_entry workaround_bpflush_sync_exception_sp_elx
82	b	sync_exception_sp_elx
83end_vector_entry workaround_bpflush_sync_exception_sp_elx
84
85vector_entry workaround_bpflush_irq_sp_elx
86	b	irq_sp_elx
87end_vector_entry workaround_bpflush_irq_sp_elx
88
89vector_entry workaround_bpflush_fiq_sp_elx
90	b	fiq_sp_elx
91end_vector_entry workaround_bpflush_fiq_sp_elx
92
93vector_entry workaround_bpflush_serror_sp_elx
94	b	serror_sp_elx
95end_vector_entry workaround_bpflush_serror_sp_elx
96
97	/* ---------------------------------------------------------------------
98	 * Lower EL using AArch64 : 0x400 - 0x600
99	 * ---------------------------------------------------------------------
100	 */
101vector_entry workaround_bpflush_sync_exception_aarch64
102	apply_workaround
103	b	sync_exception_aarch64
104end_vector_entry workaround_bpflush_sync_exception_aarch64
105
106vector_entry workaround_bpflush_irq_aarch64
107	apply_workaround
108	b	irq_aarch64
109end_vector_entry workaround_bpflush_irq_aarch64
110
111vector_entry workaround_bpflush_fiq_aarch64
112	apply_workaround
113	b	fiq_aarch64
114end_vector_entry workaround_bpflush_fiq_aarch64
115
116vector_entry workaround_bpflush_serror_aarch64
117	apply_workaround
118	b	serror_aarch64
119end_vector_entry workaround_bpflush_serror_aarch64
120
121	/* ---------------------------------------------------------------------
122	 * Lower EL using AArch32 : 0x600 - 0x800
123	 * ---------------------------------------------------------------------
124	 */
125vector_entry workaround_bpflush_sync_exception_aarch32
126	apply_workaround
127	b	sync_exception_aarch32
128end_vector_entry workaround_bpflush_sync_exception_aarch32
129
130vector_entry workaround_bpflush_irq_aarch32
131	apply_workaround
132	b	irq_aarch32
133end_vector_entry workaround_bpflush_irq_aarch32
134
135vector_entry workaround_bpflush_fiq_aarch32
136	apply_workaround
137	b	fiq_aarch32
138end_vector_entry workaround_bpflush_fiq_aarch32
139
140vector_entry workaround_bpflush_serror_aarch32
141	apply_workaround
142	b	serror_aarch32
143end_vector_entry workaround_bpflush_serror_aarch32
144
145	.global	denver_disable_dco
146
147	/* ---------------------------------------------
148	 * Disable debug interfaces
149	 * ---------------------------------------------
150	 */
151func denver_disable_ext_debug
152	mov	x0, #1
153	msr	osdlr_el1, x0
154	isb
155	dsb	sy
156	ret
157endfunc denver_disable_ext_debug
158
159	/* ----------------------------------------------------
160	 * Enable dynamic code optimizer (DCO)
161	 * ----------------------------------------------------
162	 */
163func denver_enable_dco
164	mov	x18, x30
165	bl	plat_my_core_pos
166	mov	x1, #1
167	lsl	x1, x1, x0
168	msr	s3_0_c15_c0_2, x1
169	mov	x30, x18
170	ret
171endfunc denver_enable_dco
172
173	/* ----------------------------------------------------
174	 * Disable dynamic code optimizer (DCO)
175	 * ----------------------------------------------------
176	 */
177func denver_disable_dco
178
179	mov	x18, x30
180
181	/* turn off background work */
182	bl	plat_my_core_pos
183	mov	x1, #1
184	lsl	x1, x1, x0
185	lsl	x2, x1, #16
186	msr	s3_0_c15_c0_2, x2
187	isb
188
189	/* wait till the background work turns off */
1901:	mrs	x2, s3_0_c15_c0_2
191	lsr	x2, x2, #32
192	and	w2, w2, 0xFFFF
193	and	x2, x2, x1
194	cbnz	x2, 1b
195
196	mov	x30, x18
197	ret
198endfunc denver_disable_dco
199
200func check_errata_cve_2017_5715
201	mov	x0, #ERRATA_MISSING
202#if WORKAROUND_CVE_2017_5715
203	/*
204	 * Check if the CPU supports the special instruction
205	 * required to flush the indirect branch predictor and
206	 * RSB. Support for this operation can be determined by
207	 * comparing bits 19:16 of ID_AFR0_EL1 with 0b0001.
208	 */
209	mrs	x1, id_afr0_el1
210	mov	x2, #0x10000
211	and	x1, x1, x2
212	cbz	x1, 1f
213	mov	x0, #ERRATA_APPLIES
2141:
215#endif
216	ret
217endfunc check_errata_cve_2017_5715
218
219func check_errata_cve_2018_3639
220#if WORKAROUND_CVE_2018_3639
221	mov	x0, #ERRATA_APPLIES
222#else
223	mov	x0, #ERRATA_MISSING
224#endif
225	ret
226endfunc check_errata_cve_2018_3639
227
228	/* -------------------------------------------------
229	 * The CPU Ops reset function for Denver.
230	 * -------------------------------------------------
231	 */
232func denver_reset_func
233
234	mov	x19, x30
235
236#if IMAGE_BL31 && WORKAROUND_CVE_2017_5715
237	/*
238	 * Check if the CPU supports the special instruction
239	 * required to flush the indirect branch predictor and
240	 * RSB. Support for this operation can be determined by
241	 * comparing bits 19:16 of ID_AFR0_EL1 with 0b0001.
242	 */
243	mrs	x0, id_afr0_el1
244	mov	x1, #0x10000
245	and	x0, x0, x1
246	cmp	x0, #0
247	adr	x1, workaround_bpflush_runtime_exceptions
248	mrs	x2, vbar_el3
249	csel	x0, x1, x2, ne
250	msr	vbar_el3, x0
251#endif
252
253#if WORKAROUND_CVE_2018_3639
254	/*
255	 * Denver CPUs with DENVER_MIDR_PN3 or earlier, use different
256	 * bits in the ACTLR_EL3 register to disable speculative
257	 * store buffer and memory disambiguation.
258	 */
259	mrs	x0, midr_el1
260	mov_imm	x1, DENVER_MIDR_PN4
261	cmp	x0, x1
262	mrs	x0, actlr_el3
263	mov	x1, #(DENVER_CPU_DIS_MD_EL3 | DENVER_CPU_DIS_SSB_EL3)
264	mov	x2, #(DENVER_PN4_CPU_DIS_MD_EL3 | DENVER_PN4_CPU_DIS_SSB_EL3)
265	csel	x3, x1, x2, ne
266	orr	x0, x0, x3
267	msr	actlr_el3, x0
268	isb
269	dsb	sy
270#endif
271
272	/* ----------------------------------------------------
273	 * Reset ACTLR.PMSTATE to C1 state
274	 * ----------------------------------------------------
275	 */
276	mrs	x0, actlr_el1
277	bic	x0, x0, #DENVER_CPU_PMSTATE_MASK
278	orr	x0, x0, #DENVER_CPU_PMSTATE_C1
279	msr	actlr_el1, x0
280
281	/* ----------------------------------------------------
282	 * Enable dynamic code optimizer (DCO)
283	 * ----------------------------------------------------
284	 */
285	bl	denver_enable_dco
286
287	ret	x19
288endfunc denver_reset_func
289
290	/* ----------------------------------------------------
291	 * The CPU Ops core power down function for Denver.
292	 * ----------------------------------------------------
293	 */
294func denver_core_pwr_dwn
295
296	mov	x19, x30
297
298	/* ---------------------------------------------
299	 * Force the debug interfaces to be quiescent
300	 * ---------------------------------------------
301	 */
302	bl	denver_disable_ext_debug
303
304	ret	x19
305endfunc denver_core_pwr_dwn
306
307	/* -------------------------------------------------------
308	 * The CPU Ops cluster power down function for Denver.
309	 * -------------------------------------------------------
310	 */
311func denver_cluster_pwr_dwn
312	ret
313endfunc denver_cluster_pwr_dwn
314
315#if REPORT_ERRATA
316	/*
317	 * Errata printing function for Denver. Must follow AAPCS.
318	 */
319func denver_errata_report
320	stp	x8, x30, [sp, #-16]!
321
322	bl	cpu_get_rev_var
323	mov	x8, x0
324
325	/*
326	 * Report all errata. The revision-variant information is passed to
327	 * checking functions of each errata.
328	 */
329	report_errata WORKAROUND_CVE_2017_5715, denver, cve_2017_5715
330	report_errata WORKAROUND_CVE_2018_3639, denver, cve_2018_3639
331
332	ldp	x8, x30, [sp], #16
333	ret
334endfunc denver_errata_report
335#endif
336
337	/* ---------------------------------------------
338	 * This function provides Denver specific
339	 * register information for crash reporting.
340	 * It needs to return with x6 pointing to
341	 * a list of register names in ascii and
342	 * x8 - x15 having values of registers to be
343	 * reported.
344	 * ---------------------------------------------
345	 */
346.section .rodata.denver_regs, "aS"
347denver_regs:  /* The ascii list of register names to be reported */
348	.asciz	"actlr_el1", ""
349
350func denver_cpu_reg_dump
351	adr	x6, denver_regs
352	mrs	x8, ACTLR_EL1
353	ret
354endfunc denver_cpu_reg_dump
355
356declare_cpu_ops_wa denver, DENVER_MIDR_PN0, \
357	denver_reset_func, \
358	check_errata_cve_2017_5715, \
359	CPU_NO_EXTRA2_FUNC, \
360	denver_core_pwr_dwn, \
361	denver_cluster_pwr_dwn
362
363declare_cpu_ops_wa denver, DENVER_MIDR_PN1, \
364	denver_reset_func, \
365	check_errata_cve_2017_5715, \
366	CPU_NO_EXTRA2_FUNC, \
367	denver_core_pwr_dwn, \
368	denver_cluster_pwr_dwn
369
370declare_cpu_ops_wa denver, DENVER_MIDR_PN2, \
371	denver_reset_func, \
372	check_errata_cve_2017_5715, \
373	CPU_NO_EXTRA2_FUNC, \
374	denver_core_pwr_dwn, \
375	denver_cluster_pwr_dwn
376
377declare_cpu_ops_wa denver, DENVER_MIDR_PN3, \
378	denver_reset_func, \
379	check_errata_cve_2017_5715, \
380	CPU_NO_EXTRA2_FUNC, \
381	denver_core_pwr_dwn, \
382	denver_cluster_pwr_dwn
383
384declare_cpu_ops_wa denver, DENVER_MIDR_PN4, \
385	denver_reset_func, \
386	check_errata_cve_2017_5715, \
387	CPU_NO_EXTRA2_FUNC, \
388	denver_core_pwr_dwn, \
389	denver_cluster_pwr_dwn
390
391declare_cpu_ops_wa denver, DENVER_MIDR_PN5, \
392	denver_reset_func, \
393	check_errata_cve_2017_5715, \
394	CPU_NO_EXTRA2_FUNC, \
395	denver_core_pwr_dwn, \
396	denver_cluster_pwr_dwn
397
398declare_cpu_ops_wa denver, DENVER_MIDR_PN6, \
399	denver_reset_func, \
400	check_errata_cve_2017_5715, \
401	CPU_NO_EXTRA2_FUNC, \
402	denver_core_pwr_dwn, \
403	denver_cluster_pwr_dwn
404
405declare_cpu_ops_wa denver, DENVER_MIDR_PN7, \
406	denver_reset_func, \
407	check_errata_cve_2017_5715, \
408	CPU_NO_EXTRA2_FUNC, \
409	denver_core_pwr_dwn, \
410	denver_cluster_pwr_dwn
411
412declare_cpu_ops_wa denver, DENVER_MIDR_PN8, \
413	denver_reset_func, \
414	check_errata_cve_2017_5715, \
415	CPU_NO_EXTRA2_FUNC, \
416	denver_core_pwr_dwn, \
417	denver_cluster_pwr_dwn
418