xref: /rk3399_ARM-atf/lib/cpus/aarch64/cpu_helpers.S (revision 1d2706dbaf98634aa1eecc65e52b54acf330df3d)
1/*
2 * Copyright (c) 2014-2023, Arm Limited and Contributors. All rights reserved.
3 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 */
6
7#include <arch.h>
8#include <asm_macros.S>
9#include <assert_macros.S>
10#include <common/bl_common.h>
11#include <common/debug.h>
12#include <cpu_macros.S>
13#include <lib/cpus/errata_report.h>
14#include <lib/el3_runtime/cpu_data.h>
15
16 /* Reset fn is needed in BL at reset vector */
17#if defined(IMAGE_BL1) || defined(IMAGE_BL31) ||	\
18	(defined(IMAGE_BL2) && RESET_TO_BL2)
19	/*
20	 * The reset handler common to all platforms.  After a matching
21	 * cpu_ops structure entry is found, the correponding reset_handler
22	 * in the cpu_ops is invoked.
23	 * Clobbers: x0 - x19, x30
24	 */
25	.globl	reset_handler
26func reset_handler
27	mov	x19, x30
28
29	/* The plat_reset_handler can clobber x0 - x18, x30 */
30	bl	plat_reset_handler
31
32	/* Get the matching cpu_ops pointer */
33	bl	get_cpu_ops_ptr
34
35	/* Get the cpu_ops reset handler */
36	ldr	x2, [x0, #CPU_RESET_FUNC]
37	mov	x30, x19
38	cbz	x2, 1f
39
40	/* The cpu_ops reset handler can clobber x0 - x19, x30 */
41	br	x2
421:
43	ret
44endfunc reset_handler
45
46#endif
47
48#ifdef IMAGE_BL31 /* The power down core and cluster is needed only in  BL31 */
49	/*
50	 * void prepare_cpu_pwr_dwn(unsigned int power_level)
51	 *
52	 * Prepare CPU power down function for all platforms. The function takes
53	 * a domain level to be powered down as its parameter. After the cpu_ops
54	 * pointer is retrieved from cpu_data, the handler for requested power
55	 * level is called.
56	 */
57	.globl	prepare_cpu_pwr_dwn
58func prepare_cpu_pwr_dwn
59	/*
60	 * If the given power level exceeds CPU_MAX_PWR_DWN_OPS, we call the
61	 * power down handler for the last power level
62	 */
63	mov_imm	x2, (CPU_MAX_PWR_DWN_OPS - 1)
64	cmp	x0, x2
65	csel	x2, x2, x0, hi
66
67	mrs	x1, tpidr_el3
68	ldr	x0, [x1, #CPU_DATA_CPU_OPS_PTR]
69#if ENABLE_ASSERTIONS
70	cmp	x0, #0
71	ASM_ASSERT(ne)
72#endif
73
74	/* Get the appropriate power down handler */
75	mov	x1, #CPU_PWR_DWN_OPS
76	add	x1, x1, x2, lsl #3
77	ldr	x1, [x0, x1]
78#if ENABLE_ASSERTIONS
79	cmp	x1, #0
80	ASM_ASSERT(ne)
81#endif
82	br	x1
83endfunc prepare_cpu_pwr_dwn
84
85
86	/*
87	 * Initializes the cpu_ops_ptr if not already initialized
88	 * in cpu_data. This can be called without a runtime stack, but may
89	 * only be called after the MMU is enabled.
90	 * clobbers: x0 - x6, x10
91	 */
92	.globl	init_cpu_ops
93func init_cpu_ops
94	mrs	x6, tpidr_el3
95	ldr	x0, [x6, #CPU_DATA_CPU_OPS_PTR]
96	cbnz	x0, 1f
97	mov	x10, x30
98	bl	get_cpu_ops_ptr
99	str	x0, [x6, #CPU_DATA_CPU_OPS_PTR]!
100	mov x30, x10
1011:
102	ret
103endfunc init_cpu_ops
104#endif /* IMAGE_BL31 */
105
106#if defined(IMAGE_BL31) && CRASH_REPORTING
107	/*
108	 * The cpu specific registers which need to be reported in a crash
109	 * are reported via cpu_ops cpu_reg_dump function. After a matching
110	 * cpu_ops structure entry is found, the correponding cpu_reg_dump
111	 * in the cpu_ops is invoked.
112	 */
113	.globl	do_cpu_reg_dump
114func do_cpu_reg_dump
115	mov	x16, x30
116
117	/* Get the matching cpu_ops pointer */
118	bl	get_cpu_ops_ptr
119	cbz	x0, 1f
120
121	/* Get the cpu_ops cpu_reg_dump */
122	ldr	x2, [x0, #CPU_REG_DUMP]
123	cbz	x2, 1f
124	blr	x2
1251:
126	mov	x30, x16
127	ret
128endfunc do_cpu_reg_dump
129#endif
130
131	/*
132	 * The below function returns the cpu_ops structure matching the
133	 * midr of the core. It reads the MIDR_EL1 and finds the matching
134	 * entry in cpu_ops entries. Only the implementation and part number
135	 * are used to match the entries.
136	 *
137	 * If cpu_ops for the MIDR_EL1 cannot be found and
138	 * SUPPORT_UNKNOWN_MPID is enabled, it will try to look for a
139	 * default cpu_ops with an MIDR value of 0.
140	 * (Implementation number 0x0 should be reserved for software use
141	 * and therefore no clashes should happen with that default value).
142	 *
143	 * Return :
144	 *     x0 - The matching cpu_ops pointer on Success
145	 *     x0 - 0 on failure.
146	 * Clobbers : x0 - x5
147	 */
148	.globl	get_cpu_ops_ptr
149func get_cpu_ops_ptr
150	/* Read the MIDR_EL1 */
151	mrs	x2, midr_el1
152	mov_imm	x3, CPU_IMPL_PN_MASK
153
154	/* Retain only the implementation and part number using mask */
155	and	w2, w2, w3
156
157	/* Get the cpu_ops end location */
158	adr	x5, (__CPU_OPS_END__ + CPU_MIDR)
159
160	/* Initialize the return parameter */
161	mov	x0, #0
1621:
163	/* Get the cpu_ops start location */
164	adr	x4, (__CPU_OPS_START__ + CPU_MIDR)
165
1662:
167	/* Check if we have reached end of list */
168	cmp	x4, x5
169	b.eq	search_def_ptr
170
171	/* load the midr from the cpu_ops */
172	ldr	x1, [x4], #CPU_OPS_SIZE
173	and	w1, w1, w3
174
175	/* Check if midr matches to midr of this core */
176	cmp	w1, w2
177	b.ne	2b
178
179	/* Subtract the increment and offset to get the cpu-ops pointer */
180	sub	x0, x4, #(CPU_OPS_SIZE + CPU_MIDR)
181#if ENABLE_ASSERTIONS
182	cmp	x0, #0
183	ASM_ASSERT(ne)
184#endif
185#ifdef SUPPORT_UNKNOWN_MPID
186	cbnz	x2, exit_mpid_found
187	/* Mark the unsupported MPID flag */
188	adrp	x1, unsupported_mpid_flag
189	add	x1, x1, :lo12:unsupported_mpid_flag
190	str	w2, [x1]
191exit_mpid_found:
192#endif
193	ret
194
195	/*
196	 * Search again for a default pointer (MIDR = 0x0)
197	 * or return error if already searched.
198	 */
199search_def_ptr:
200#ifdef SUPPORT_UNKNOWN_MPID
201	cbz	x2, error_exit
202	mov	x2, #0
203	b	1b
204error_exit:
205#endif
206	ret
207endfunc get_cpu_ops_ptr
208
209/*
210 * Extract CPU revision and variant, and combine them into a single numeric for
211 * easier comparison.
212 */
213	.globl	cpu_get_rev_var
214func cpu_get_rev_var
215	mrs	x1, midr_el1
216
217	/*
218	 * Extract the variant[23:20] and revision[3:0] from MIDR, and pack them
219	 * as variant[7:4] and revision[3:0] of x0.
220	 *
221	 * First extract x1[23:16] to x0[7:0] and zero fill the rest. Then
222	 * extract x1[3:0] into x0[3:0] retaining other bits.
223	 */
224	ubfx	x0, x1, #(MIDR_VAR_SHIFT - MIDR_REV_BITS), #(MIDR_REV_BITS + MIDR_VAR_BITS)
225	bfxil	x0, x1, #MIDR_REV_SHIFT, #MIDR_REV_BITS
226	ret
227endfunc cpu_get_rev_var
228
229/*
230 * Compare the CPU's revision-variant (x0) with a given value (x1), for errata
231 * application purposes. If the revision-variant is less than or same as a given
232 * value, indicates that errata applies; otherwise not.
233 *
234 * Shall clobber: x0-x3
235 */
236	.globl	cpu_rev_var_ls
237func cpu_rev_var_ls
238	mov	x2, #ERRATA_APPLIES
239	mov	x3, #ERRATA_NOT_APPLIES
240	cmp	x0, x1
241	csel	x0, x2, x3, ls
242	ret
243endfunc cpu_rev_var_ls
244
245/*
246 * Compare the CPU's revision-variant (x0) with a given value (x1), for errata
247 * application purposes. If the revision-variant is higher than or same as a
248 * given value, indicates that errata applies; otherwise not.
249 *
250 * Shall clobber: x0-x3
251 */
252	.globl	cpu_rev_var_hs
253func cpu_rev_var_hs
254	mov	x2, #ERRATA_APPLIES
255	mov	x3, #ERRATA_NOT_APPLIES
256	cmp	x0, x1
257	csel	x0, x2, x3, hs
258	ret
259endfunc cpu_rev_var_hs
260
261/*
262 * Compare the CPU's revision-variant (x0) with a given range (x1 - x2), for errata
263 * application purposes. If the revision-variant is between or includes the given
264 * values, this indicates that errata applies; otherwise not.
265 *
266 * Shall clobber: x0-x4
267 */
268	.globl	cpu_rev_var_range
269func cpu_rev_var_range
270	mov	x3, #ERRATA_APPLIES
271	mov	x4, #ERRATA_NOT_APPLIES
272	cmp	x0, x1
273	csel	x1, x3, x4, hs
274	cbz	x1, 1f
275	cmp	x0, x2
276	csel	x1, x3, x4, ls
2771:
278	mov	x0, x1
279	ret
280endfunc cpu_rev_var_range
281
282#if REPORT_ERRATA
283/*
284 * void print_errata_status(void);
285 *
286 * Function to print errata status for CPUs of its class. Must be called only:
287 *
288 *   - with MMU and data caches are enabled;
289 *   - after cpu_ops have been initialized in per-CPU data.
290 */
291	.globl print_errata_status
292func print_errata_status
293#ifdef IMAGE_BL1
294	/*
295	 * BL1 doesn't have per-CPU data. So retrieve the CPU operations
296	 * directly.
297	 */
298	stp	xzr, x30, [sp, #-16]!
299	bl	get_cpu_ops_ptr
300	ldp	xzr, x30, [sp], #16
301	ldr	x1, [x0, #CPU_ERRATA_FUNC]
302	cbnz	x1, .Lprint
303#else
304	/*
305	 * Retrieve pointer to cpu_ops from per-CPU data, and further, the
306	 * errata printing function. If it's non-NULL, jump to the function in
307	 * turn.
308	 */
309	mrs	x0, tpidr_el3
310#if ENABLE_ASSERTIONS
311	cmp	x0, #0
312	ASM_ASSERT(ne)
313#endif
314	ldr	x1, [x0, #CPU_DATA_CPU_OPS_PTR]
315#if ENABLE_ASSERTIONS
316	cmp	x1, #0
317	ASM_ASSERT(ne)
318#endif
319	ldr	x0, [x1, #CPU_ERRATA_FUNC]
320	cbz	x0, .Lnoprint
321
322	/*
323	 * Printing errata status requires atomically testing the printed flag.
324	 */
325	stp	x19, x30, [sp, #-16]!
326	mov	x19, x0
327
328	/*
329	 * Load pointers to errata lock and printed flag. Call
330	 * errata_needs_reporting to check whether this CPU needs to report
331	 * errata status pertaining to its class.
332	 */
333	ldr	x0, [x1, #CPU_ERRATA_LOCK]
334	ldr	x1, [x1, #CPU_ERRATA_PRINTED]
335	bl	errata_needs_reporting
336	mov	x1, x19
337	ldp	x19, x30, [sp], #16
338	cbnz	x0, .Lprint
339#endif
340.Lnoprint:
341	ret
342.Lprint:
343	/* Jump to errata reporting function for this CPU */
344	br	x1
345endfunc print_errata_status
346#endif
347
348/*
349 * int check_wa_cve_2017_5715(void);
350 *
351 * This function returns:
352 *  - ERRATA_APPLIES when firmware mitigation is required.
353 *  - ERRATA_NOT_APPLIES when firmware mitigation is _not_ required.
354 *  - ERRATA_MISSING when firmware mitigation would be required but
355 *    is not compiled in.
356 *
357 * NOTE: Must be called only after cpu_ops have been initialized
358 *       in per-CPU data.
359 */
360	.globl	check_wa_cve_2017_5715
361func check_wa_cve_2017_5715
362	mrs	x0, tpidr_el3
363#if ENABLE_ASSERTIONS
364	cmp	x0, #0
365	ASM_ASSERT(ne)
366#endif
367	ldr	x0, [x0, #CPU_DATA_CPU_OPS_PTR]
368#if ENABLE_ASSERTIONS
369	cmp	x0, #0
370	ASM_ASSERT(ne)
371#endif
372	ldr	x0, [x0, #CPU_EXTRA1_FUNC]
373	/*
374	 * If the reserved function pointer is NULL, this CPU
375	 * is unaffected by CVE-2017-5715 so bail out.
376	 */
377	cmp	x0, #CPU_NO_EXTRA1_FUNC
378	beq	1f
379	br	x0
3801:
381	mov	x0, #ERRATA_NOT_APPLIES
382	ret
383endfunc check_wa_cve_2017_5715
384
385/*
386 * void *wa_cve_2018_3639_get_disable_ptr(void);
387 *
388 * Returns a function pointer which is used to disable mitigation
389 * for CVE-2018-3639.
390 * The function pointer is only returned on cores that employ
391 * dynamic mitigation.  If the core uses static mitigation or is
392 * unaffected by CVE-2018-3639 this function returns NULL.
393 *
394 * NOTE: Must be called only after cpu_ops have been initialized
395 *       in per-CPU data.
396 */
397	.globl	wa_cve_2018_3639_get_disable_ptr
398func wa_cve_2018_3639_get_disable_ptr
399	mrs	x0, tpidr_el3
400#if ENABLE_ASSERTIONS
401	cmp	x0, #0
402	ASM_ASSERT(ne)
403#endif
404	ldr	x0, [x0, #CPU_DATA_CPU_OPS_PTR]
405#if ENABLE_ASSERTIONS
406	cmp	x0, #0
407	ASM_ASSERT(ne)
408#endif
409	ldr	x0, [x0, #CPU_EXTRA2_FUNC]
410	ret
411endfunc wa_cve_2018_3639_get_disable_ptr
412
413/*
414 * int check_smccc_arch_wa3_applies(void);
415 *
416 * This function checks whether SMCCC_ARCH_WORKAROUND_3 is enabled to mitigate
417 * CVE-2022-23960 for this CPU. It returns:
418 *  - ERRATA_APPLIES when SMCCC_ARCH_WORKAROUND_3 can be invoked to mitigate
419 *    the CVE.
420 *  - ERRATA_NOT_APPLIES when SMCCC_ARCH_WORKAROUND_3 should not be invoked to
421 *    mitigate the CVE.
422 *
423 * NOTE: Must be called only after cpu_ops have been initialized
424 *       in per-CPU data.
425 */
426	.globl	check_smccc_arch_wa3_applies
427func check_smccc_arch_wa3_applies
428	mrs	x0, tpidr_el3
429#if ENABLE_ASSERTIONS
430	cmp	x0, #0
431	ASM_ASSERT(ne)
432#endif
433	ldr	x0, [x0, #CPU_DATA_CPU_OPS_PTR]
434#if ENABLE_ASSERTIONS
435	cmp	x0, #0
436	ASM_ASSERT(ne)
437#endif
438	ldr	x0, [x0, #CPU_EXTRA3_FUNC]
439	/*
440	 * If the reserved function pointer is NULL, this CPU
441	 * is unaffected by CVE-2022-23960 so bail out.
442	 */
443	cmp	x0, #CPU_NO_EXTRA3_FUNC
444	beq	1f
445	br	x0
4461:
447	mov	x0, #ERRATA_NOT_APPLIES
448	ret
449endfunc check_smccc_arch_wa3_applies
450