xref: /rk3399_ARM-atf/lib/cpus/aarch64/cpu_helpers.S (revision ebd6efae67c6a086bc97d807a638bde324d936dc)
1/*
2 * Copyright (c) 2014-2019, ARM Limited and Contributors. All rights reserved.
3 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 */
6
7#include <arch.h>
8#include <asm_macros.S>
9#include <assert_macros.S>
10#include <common/bl_common.h>
11#include <common/debug.h>
12#include <cpu_macros.S>
13#include <lib/cpus/errata_report.h>
14#include <lib/el3_runtime/cpu_data.h>
15
16 /* Reset fn is needed in BL at reset vector */
17#if defined(IMAGE_BL1) || defined(IMAGE_BL31) || (defined(IMAGE_BL2) && BL2_AT_EL3)
18	/*
19	 * The reset handler common to all platforms.  After a matching
20	 * cpu_ops structure entry is found, the correponding reset_handler
21	 * in the cpu_ops is invoked.
22	 * Clobbers: x0 - x19, x30
23	 */
24	.globl	reset_handler
25func reset_handler
26	mov	x19, x30
27
28	/* The plat_reset_handler can clobber x0 - x18, x30 */
29	bl	plat_reset_handler
30
31	/* Get the matching cpu_ops pointer */
32	bl	get_cpu_ops_ptr
33#if ENABLE_ASSERTIONS
34	cmp	x0, #0
35	ASM_ASSERT(ne)
36#endif
37
38	/* Get the cpu_ops reset handler */
39	ldr	x2, [x0, #CPU_RESET_FUNC]
40	mov	x30, x19
41	cbz	x2, 1f
42
43	/* The cpu_ops reset handler can clobber x0 - x19, x30 */
44	br	x2
451:
46	ret
47endfunc reset_handler
48
49#endif
50
51#ifdef IMAGE_BL31 /* The power down core and cluster is needed only in  BL31 */
52	/*
53	 * void prepare_cpu_pwr_dwn(unsigned int power_level)
54	 *
55	 * Prepare CPU power down function for all platforms. The function takes
56	 * a domain level to be powered down as its parameter. After the cpu_ops
57	 * pointer is retrieved from cpu_data, the handler for requested power
58	 * level is called.
59	 */
60	.globl	prepare_cpu_pwr_dwn
61func prepare_cpu_pwr_dwn
62	/*
63	 * If the given power level exceeds CPU_MAX_PWR_DWN_OPS, we call the
64	 * power down handler for the last power level
65	 */
66	mov_imm	x2, (CPU_MAX_PWR_DWN_OPS - 1)
67	cmp	x0, x2
68	csel	x2, x2, x0, hi
69
70	mrs	x1, tpidr_el3
71	ldr	x0, [x1, #CPU_DATA_CPU_OPS_PTR]
72#if ENABLE_ASSERTIONS
73	cmp	x0, #0
74	ASM_ASSERT(ne)
75#endif
76
77	/* Get the appropriate power down handler */
78	mov	x1, #CPU_PWR_DWN_OPS
79	add	x1, x1, x2, lsl #3
80	ldr	x1, [x0, x1]
81	br	x1
82endfunc prepare_cpu_pwr_dwn
83
84
85	/*
86	 * Initializes the cpu_ops_ptr if not already initialized
87	 * in cpu_data. This can be called without a runtime stack, but may
88	 * only be called after the MMU is enabled.
89	 * clobbers: x0 - x6, x10
90	 */
91	.globl	init_cpu_ops
92func init_cpu_ops
93	mrs	x6, tpidr_el3
94	ldr	x0, [x6, #CPU_DATA_CPU_OPS_PTR]
95	cbnz	x0, 1f
96	mov	x10, x30
97	bl	get_cpu_ops_ptr
98#if ENABLE_ASSERTIONS
99	cmp	x0, #0
100	ASM_ASSERT(ne)
101#endif
102	str	x0, [x6, #CPU_DATA_CPU_OPS_PTR]!
103	mov x30, x10
1041:
105	ret
106endfunc init_cpu_ops
107#endif /* IMAGE_BL31 */
108
109#if defined(IMAGE_BL31) && CRASH_REPORTING
110	/*
111	 * The cpu specific registers which need to be reported in a crash
112	 * are reported via cpu_ops cpu_reg_dump function. After a matching
113	 * cpu_ops structure entry is found, the correponding cpu_reg_dump
114	 * in the cpu_ops is invoked.
115	 */
116	.globl	do_cpu_reg_dump
117func do_cpu_reg_dump
118	mov	x16, x30
119
120	/* Get the matching cpu_ops pointer */
121	bl	get_cpu_ops_ptr
122	cbz	x0, 1f
123
124	/* Get the cpu_ops cpu_reg_dump */
125	ldr	x2, [x0, #CPU_REG_DUMP]
126	cbz	x2, 1f
127	blr	x2
1281:
129	mov	x30, x16
130	ret
131endfunc do_cpu_reg_dump
132#endif
133
134	/*
135	 * The below function returns the cpu_ops structure matching the
136	 * midr of the core. It reads the MIDR_EL1 and finds the matching
137	 * entry in cpu_ops entries. Only the implementation and part number
138	 * are used to match the entries.
139	 * Return :
140	 *     x0 - The matching cpu_ops pointer on Success
141	 *     x0 - 0 on failure.
142	 * Clobbers : x0 - x5
143	 */
144	.globl	get_cpu_ops_ptr
145func get_cpu_ops_ptr
146	/* Get the cpu_ops start and end locations */
147	adr	x4, (__CPU_OPS_START__ + CPU_MIDR)
148	adr	x5, (__CPU_OPS_END__ + CPU_MIDR)
149
150	/* Initialize the return parameter */
151	mov	x0, #0
152
153	/* Read the MIDR_EL1 */
154	mrs	x2, midr_el1
155	mov_imm	x3, CPU_IMPL_PN_MASK
156
157	/* Retain only the implementation and part number using mask */
158	and	w2, w2, w3
1591:
160	/* Check if we have reached end of list */
161	cmp	x4, x5
162	b.eq	error_exit
163
164	/* load the midr from the cpu_ops */
165	ldr	x1, [x4], #CPU_OPS_SIZE
166	and	w1, w1, w3
167
168	/* Check if midr matches to midr of this core */
169	cmp	w1, w2
170	b.ne	1b
171
172	/* Subtract the increment and offset to get the cpu-ops pointer */
173	sub	x0, x4, #(CPU_OPS_SIZE + CPU_MIDR)
174error_exit:
175	ret
176endfunc get_cpu_ops_ptr
177
178/*
179 * Extract CPU revision and variant, and combine them into a single numeric for
180 * easier comparison.
181 */
182	.globl	cpu_get_rev_var
183func cpu_get_rev_var
184	mrs	x1, midr_el1
185
186	/*
187	 * Extract the variant[23:20] and revision[3:0] from MIDR, and pack them
188	 * as variant[7:4] and revision[3:0] of x0.
189	 *
190	 * First extract x1[23:16] to x0[7:0] and zero fill the rest. Then
191	 * extract x1[3:0] into x0[3:0] retaining other bits.
192	 */
193	ubfx	x0, x1, #(MIDR_VAR_SHIFT - MIDR_REV_BITS), #(MIDR_REV_BITS + MIDR_VAR_BITS)
194	bfxil	x0, x1, #MIDR_REV_SHIFT, #MIDR_REV_BITS
195	ret
196endfunc cpu_get_rev_var
197
198/*
199 * Compare the CPU's revision-variant (x0) with a given value (x1), for errata
200 * application purposes. If the revision-variant is less than or same as a given
201 * value, indicates that errata applies; otherwise not.
202 *
203 * Shall clobber: x0-x3
204 */
205	.globl	cpu_rev_var_ls
206func cpu_rev_var_ls
207	mov	x2, #ERRATA_APPLIES
208	mov	x3, #ERRATA_NOT_APPLIES
209	cmp	x0, x1
210	csel	x0, x2, x3, ls
211	ret
212endfunc cpu_rev_var_ls
213
214/*
215 * Compare the CPU's revision-variant (x0) with a given value (x1), for errata
216 * application purposes. If the revision-variant is higher than or same as a
217 * given value, indicates that errata applies; otherwise not.
218 *
219 * Shall clobber: x0-x3
220 */
221	.globl	cpu_rev_var_hs
222func cpu_rev_var_hs
223	mov	x2, #ERRATA_APPLIES
224	mov	x3, #ERRATA_NOT_APPLIES
225	cmp	x0, x1
226	csel	x0, x2, x3, hs
227	ret
228endfunc cpu_rev_var_hs
229
230/*
231 * Compare the CPU's revision-variant (x0) with a given range (x1 - x2), for errata
232 * application purposes. If the revision-variant is between or includes the given
233 * values, this indicates that errata applies; otherwise not.
234 *
235 * Shall clobber: x0-x4
236 */
237	.globl	cpu_rev_var_range
238func cpu_rev_var_range
239	mov	x3, #ERRATA_APPLIES
240	mov	x4, #ERRATA_NOT_APPLIES
241	cmp	x0, x1
242	csel	x1, x3, x4, hs
243	cbz	x1, 1f
244	cmp	x0, x2
245	csel	x1, x3, x4, ls
2461:
247	mov	x0, x1
248	ret
249endfunc cpu_rev_var_range
250
251#if REPORT_ERRATA
252/*
253 * void print_errata_status(void);
254 *
255 * Function to print errata status for CPUs of its class. Must be called only:
256 *
257 *   - with MMU and data caches are enabled;
258 *   - after cpu_ops have been initialized in per-CPU data.
259 */
260	.globl print_errata_status
261func print_errata_status
262#ifdef IMAGE_BL1
263	/*
264	 * BL1 doesn't have per-CPU data. So retrieve the CPU operations
265	 * directly.
266	 */
267	stp	xzr, x30, [sp, #-16]!
268	bl	get_cpu_ops_ptr
269	ldp	xzr, x30, [sp], #16
270	ldr	x1, [x0, #CPU_ERRATA_FUNC]
271	cbnz	x1, .Lprint
272#else
273	/*
274	 * Retrieve pointer to cpu_ops from per-CPU data, and further, the
275	 * errata printing function. If it's non-NULL, jump to the function in
276	 * turn.
277	 */
278	mrs	x0, tpidr_el3
279	ldr	x1, [x0, #CPU_DATA_CPU_OPS_PTR]
280	ldr	x0, [x1, #CPU_ERRATA_FUNC]
281	cbz	x0, .Lnoprint
282
283	/*
284	 * Printing errata status requires atomically testing the printed flag.
285	 */
286	stp	x19, x30, [sp, #-16]!
287	mov	x19, x0
288
289	/*
290	 * Load pointers to errata lock and printed flag. Call
291	 * errata_needs_reporting to check whether this CPU needs to report
292	 * errata status pertaining to its class.
293	 */
294	ldr	x0, [x1, #CPU_ERRATA_LOCK]
295	ldr	x1, [x1, #CPU_ERRATA_PRINTED]
296	bl	errata_needs_reporting
297	mov	x1, x19
298	ldp	x19, x30, [sp], #16
299	cbnz	x0, .Lprint
300#endif
301.Lnoprint:
302	ret
303.Lprint:
304	/* Jump to errata reporting function for this CPU */
305	br	x1
306endfunc print_errata_status
307#endif
308
309/*
310 * int check_wa_cve_2017_5715(void);
311 *
312 * This function returns:
313 *  - ERRATA_APPLIES when firmware mitigation is required.
314 *  - ERRATA_NOT_APPLIES when firmware mitigation is _not_ required.
315 *  - ERRATA_MISSING when firmware mitigation would be required but
316 *    is not compiled in.
317 *
318 * NOTE: Must be called only after cpu_ops have been initialized
319 *       in per-CPU data.
320 */
321	.globl	check_wa_cve_2017_5715
322func check_wa_cve_2017_5715
323	mrs	x0, tpidr_el3
324#if ENABLE_ASSERTIONS
325	cmp	x0, #0
326	ASM_ASSERT(ne)
327#endif
328	ldr	x0, [x0, #CPU_DATA_CPU_OPS_PTR]
329	ldr	x0, [x0, #CPU_EXTRA1_FUNC]
330	/*
331	 * If the reserved function pointer is NULL, this CPU
332	 * is unaffected by CVE-2017-5715 so bail out.
333	 */
334	cmp	x0, #0
335	beq	1f
336	br	x0
3371:
338	mov	x0, #ERRATA_NOT_APPLIES
339	ret
340endfunc check_wa_cve_2017_5715
341
342/*
343 * void *wa_cve_2018_3639_get_disable_ptr(void);
344 *
345 * Returns a function pointer which is used to disable mitigation
346 * for CVE-2018-3639.
347 * The function pointer is only returned on cores that employ
348 * dynamic mitigation.  If the core uses static mitigation or is
349 * unaffected by CVE-2018-3639 this function returns NULL.
350 *
351 * NOTE: Must be called only after cpu_ops have been initialized
352 *       in per-CPU data.
353 */
354	.globl	wa_cve_2018_3639_get_disable_ptr
355func wa_cve_2018_3639_get_disable_ptr
356	mrs	x0, tpidr_el3
357#if ENABLE_ASSERTIONS
358	cmp	x0, #0
359	ASM_ASSERT(ne)
360#endif
361	ldr	x0, [x0, #CPU_DATA_CPU_OPS_PTR]
362	ldr	x0, [x0, #CPU_EXTRA2_FUNC]
363	ret
364endfunc wa_cve_2018_3639_get_disable_ptr
365