xref: /rk3399_ARM-atf/lib/cpus/aarch64/cpu_helpers.S (revision c948f77136c42a92d0bb660543a3600c36dcf7f1)
1/*
2 * Copyright (c) 2014-2018, ARM Limited and Contributors. All rights reserved.
3 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 */
6
7#include <arch.h>
8#include <asm_macros.S>
9#include <assert_macros.S>
10#include <common/debug.h>
11#include <cpu_macros.S>
12#include <lib/cpus/errata_report.h>
13#include <lib/el3_runtime/cpu_data.h>
14
15 /* Reset fn is needed in BL at reset vector */
16#if defined(IMAGE_BL1) || defined(IMAGE_BL31) || (defined(IMAGE_BL2) && BL2_AT_EL3)
17	/*
18	 * The reset handler common to all platforms.  After a matching
19	 * cpu_ops structure entry is found, the correponding reset_handler
20	 * in the cpu_ops is invoked.
21	 * Clobbers: x0 - x19, x30
22	 */
23	.globl	reset_handler
24func reset_handler
25	mov	x19, x30
26
27	/* The plat_reset_handler can clobber x0 - x18, x30 */
28	bl	plat_reset_handler
29
30	/* Get the matching cpu_ops pointer */
31	bl	get_cpu_ops_ptr
32#if ENABLE_ASSERTIONS
33	cmp	x0, #0
34	ASM_ASSERT(ne)
35#endif
36
37	/* Get the cpu_ops reset handler */
38	ldr	x2, [x0, #CPU_RESET_FUNC]
39	mov	x30, x19
40	cbz	x2, 1f
41
42	/* The cpu_ops reset handler can clobber x0 - x19, x30 */
43	br	x2
441:
45	ret
46endfunc reset_handler
47
48#endif
49
50#ifdef IMAGE_BL31 /* The power down core and cluster is needed only in  BL31 */
51	/*
52	 * void prepare_cpu_pwr_dwn(unsigned int power_level)
53	 *
54	 * Prepare CPU power down function for all platforms. The function takes
55	 * a domain level to be powered down as its parameter. After the cpu_ops
56	 * pointer is retrieved from cpu_data, the handler for requested power
57	 * level is called.
58	 */
59	.globl	prepare_cpu_pwr_dwn
60func prepare_cpu_pwr_dwn
61	/*
62	 * If the given power level exceeds CPU_MAX_PWR_DWN_OPS, we call the
63	 * power down handler for the last power level
64	 */
65	mov_imm	x2, (CPU_MAX_PWR_DWN_OPS - 1)
66	cmp	x0, x2
67	csel	x2, x2, x0, hi
68
69	mrs	x1, tpidr_el3
70	ldr	x0, [x1, #CPU_DATA_CPU_OPS_PTR]
71#if ENABLE_ASSERTIONS
72	cmp	x0, #0
73	ASM_ASSERT(ne)
74#endif
75
76	/* Get the appropriate power down handler */
77	mov	x1, #CPU_PWR_DWN_OPS
78	add	x1, x1, x2, lsl #3
79	ldr	x1, [x0, x1]
80	br	x1
81endfunc prepare_cpu_pwr_dwn
82
83
84	/*
85	 * Initializes the cpu_ops_ptr if not already initialized
86	 * in cpu_data. This can be called without a runtime stack, but may
87	 * only be called after the MMU is enabled.
88	 * clobbers: x0 - x6, x10
89	 */
90	.globl	init_cpu_ops
91func init_cpu_ops
92	mrs	x6, tpidr_el3
93	ldr	x0, [x6, #CPU_DATA_CPU_OPS_PTR]
94	cbnz	x0, 1f
95	mov	x10, x30
96	bl	get_cpu_ops_ptr
97#if ENABLE_ASSERTIONS
98	cmp	x0, #0
99	ASM_ASSERT(ne)
100#endif
101	str	x0, [x6, #CPU_DATA_CPU_OPS_PTR]!
102	mov x30, x10
1031:
104	ret
105endfunc init_cpu_ops
106#endif /* IMAGE_BL31 */
107
108#if defined(IMAGE_BL31) && CRASH_REPORTING
109	/*
110	 * The cpu specific registers which need to be reported in a crash
111	 * are reported via cpu_ops cpu_reg_dump function. After a matching
112	 * cpu_ops structure entry is found, the correponding cpu_reg_dump
113	 * in the cpu_ops is invoked.
114	 */
115	.globl	do_cpu_reg_dump
116func do_cpu_reg_dump
117	mov	x16, x30
118
119	/* Get the matching cpu_ops pointer */
120	bl	get_cpu_ops_ptr
121	cbz	x0, 1f
122
123	/* Get the cpu_ops cpu_reg_dump */
124	ldr	x2, [x0, #CPU_REG_DUMP]
125	cbz	x2, 1f
126	blr	x2
1271:
128	mov	x30, x16
129	ret
130endfunc do_cpu_reg_dump
131#endif
132
133	/*
134	 * The below function returns the cpu_ops structure matching the
135	 * midr of the core. It reads the MIDR_EL1 and finds the matching
136	 * entry in cpu_ops entries. Only the implementation and part number
137	 * are used to match the entries.
138	 * Return :
139	 *     x0 - The matching cpu_ops pointer on Success
140	 *     x0 - 0 on failure.
141	 * Clobbers : x0 - x5
142	 */
143	.globl	get_cpu_ops_ptr
144func get_cpu_ops_ptr
145	/* Get the cpu_ops start and end locations */
146	adr	x4, (__CPU_OPS_START__ + CPU_MIDR)
147	adr	x5, (__CPU_OPS_END__ + CPU_MIDR)
148
149	/* Initialize the return parameter */
150	mov	x0, #0
151
152	/* Read the MIDR_EL1 */
153	mrs	x2, midr_el1
154	mov_imm	x3, CPU_IMPL_PN_MASK
155
156	/* Retain only the implementation and part number using mask */
157	and	w2, w2, w3
1581:
159	/* Check if we have reached end of list */
160	cmp	x4, x5
161	b.eq	error_exit
162
163	/* load the midr from the cpu_ops */
164	ldr	x1, [x4], #CPU_OPS_SIZE
165	and	w1, w1, w3
166
167	/* Check if midr matches to midr of this core */
168	cmp	w1, w2
169	b.ne	1b
170
171	/* Subtract the increment and offset to get the cpu-ops pointer */
172	sub	x0, x4, #(CPU_OPS_SIZE + CPU_MIDR)
173error_exit:
174	ret
175endfunc get_cpu_ops_ptr
176
177/*
178 * Extract CPU revision and variant, and combine them into a single numeric for
179 * easier comparison.
180 */
181	.globl	cpu_get_rev_var
182func cpu_get_rev_var
183	mrs	x1, midr_el1
184
185	/*
186	 * Extract the variant[23:20] and revision[3:0] from MIDR, and pack them
187	 * as variant[7:4] and revision[3:0] of x0.
188	 *
189	 * First extract x1[23:16] to x0[7:0] and zero fill the rest. Then
190	 * extract x1[3:0] into x0[3:0] retaining other bits.
191	 */
192	ubfx	x0, x1, #(MIDR_VAR_SHIFT - MIDR_REV_BITS), #(MIDR_REV_BITS + MIDR_VAR_BITS)
193	bfxil	x0, x1, #MIDR_REV_SHIFT, #MIDR_REV_BITS
194	ret
195endfunc cpu_get_rev_var
196
197/*
198 * Compare the CPU's revision-variant (x0) with a given value (x1), for errata
199 * application purposes. If the revision-variant is less than or same as a given
200 * value, indicates that errata applies; otherwise not.
201 *
202 * Shall clobber: x0-x3
203 */
204	.globl	cpu_rev_var_ls
205func cpu_rev_var_ls
206	mov	x2, #ERRATA_APPLIES
207	mov	x3, #ERRATA_NOT_APPLIES
208	cmp	x0, x1
209	csel	x0, x2, x3, ls
210	ret
211endfunc cpu_rev_var_ls
212
213/*
214 * Compare the CPU's revision-variant (x0) with a given value (x1), for errata
215 * application purposes. If the revision-variant is higher than or same as a
216 * given value, indicates that errata applies; otherwise not.
217 *
218 * Shall clobber: x0-x3
219 */
220	.globl	cpu_rev_var_hs
221func cpu_rev_var_hs
222	mov	x2, #ERRATA_APPLIES
223	mov	x3, #ERRATA_NOT_APPLIES
224	cmp	x0, x1
225	csel	x0, x2, x3, hs
226	ret
227endfunc cpu_rev_var_hs
228
229#if REPORT_ERRATA
230/*
231 * void print_errata_status(void);
232 *
233 * Function to print errata status for CPUs of its class. Must be called only:
234 *
235 *   - with MMU and data caches are enabled;
236 *   - after cpu_ops have been initialized in per-CPU data.
237 */
238	.globl print_errata_status
239func print_errata_status
240#ifdef IMAGE_BL1
241	/*
242	 * BL1 doesn't have per-CPU data. So retrieve the CPU operations
243	 * directly.
244	 */
245	stp	xzr, x30, [sp, #-16]!
246	bl	get_cpu_ops_ptr
247	ldp	xzr, x30, [sp], #16
248	ldr	x1, [x0, #CPU_ERRATA_FUNC]
249	cbnz	x1, .Lprint
250#else
251	/*
252	 * Retrieve pointer to cpu_ops from per-CPU data, and further, the
253	 * errata printing function. If it's non-NULL, jump to the function in
254	 * turn.
255	 */
256	mrs	x0, tpidr_el3
257	ldr	x1, [x0, #CPU_DATA_CPU_OPS_PTR]
258	ldr	x0, [x1, #CPU_ERRATA_FUNC]
259	cbz	x0, .Lnoprint
260
261	/*
262	 * Printing errata status requires atomically testing the printed flag.
263	 */
264	stp	x19, x30, [sp, #-16]!
265	mov	x19, x0
266
267	/*
268	 * Load pointers to errata lock and printed flag. Call
269	 * errata_needs_reporting to check whether this CPU needs to report
270	 * errata status pertaining to its class.
271	 */
272	ldr	x0, [x1, #CPU_ERRATA_LOCK]
273	ldr	x1, [x1, #CPU_ERRATA_PRINTED]
274	bl	errata_needs_reporting
275	mov	x1, x19
276	ldp	x19, x30, [sp], #16
277	cbnz	x0, .Lprint
278#endif
279.Lnoprint:
280	ret
281.Lprint:
282	/* Jump to errata reporting function for this CPU */
283	br	x1
284endfunc print_errata_status
285#endif
286
287/*
288 * int check_wa_cve_2017_5715(void);
289 *
290 * This function returns:
291 *  - ERRATA_APPLIES when firmware mitigation is required.
292 *  - ERRATA_NOT_APPLIES when firmware mitigation is _not_ required.
293 *  - ERRATA_MISSING when firmware mitigation would be required but
294 *    is not compiled in.
295 *
296 * NOTE: Must be called only after cpu_ops have been initialized
297 *       in per-CPU data.
298 */
299	.globl	check_wa_cve_2017_5715
300func check_wa_cve_2017_5715
301	mrs	x0, tpidr_el3
302#if ENABLE_ASSERTIONS
303	cmp	x0, #0
304	ASM_ASSERT(ne)
305#endif
306	ldr	x0, [x0, #CPU_DATA_CPU_OPS_PTR]
307	ldr	x0, [x0, #CPU_EXTRA1_FUNC]
308	/*
309	 * If the reserved function pointer is NULL, this CPU
310	 * is unaffected by CVE-2017-5715 so bail out.
311	 */
312	cmp	x0, #0
313	beq	1f
314	br	x0
3151:
316	mov	x0, #ERRATA_NOT_APPLIES
317	ret
318endfunc check_wa_cve_2017_5715
319
320/*
321 * void *wa_cve_2018_3639_get_disable_ptr(void);
322 *
323 * Returns a function pointer which is used to disable mitigation
324 * for CVE-2018-3639.
325 * The function pointer is only returned on cores that employ
326 * dynamic mitigation.  If the core uses static mitigation or is
327 * unaffected by CVE-2018-3639 this function returns NULL.
328 *
329 * NOTE: Must be called only after cpu_ops have been initialized
330 *       in per-CPU data.
331 */
332	.globl	wa_cve_2018_3639_get_disable_ptr
333func wa_cve_2018_3639_get_disable_ptr
334	mrs	x0, tpidr_el3
335#if ENABLE_ASSERTIONS
336	cmp	x0, #0
337	ASM_ASSERT(ne)
338#endif
339	ldr	x0, [x0, #CPU_DATA_CPU_OPS_PTR]
340	ldr	x0, [x0, #CPU_EXTRA2_FUNC]
341	ret
342endfunc wa_cve_2018_3639_get_disable_ptr
343