xref: /rk3399_ARM-atf/lib/cpus/aarch64/cpu_helpers.S (revision 36eeb59f9eb0c2e966bd41b02c0dc588faffce35)
1/*
2 * Copyright (c) 2014-2025, Arm Limited and Contributors. All rights reserved.
3 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 */
6
7#include <arch.h>
8#include <asm_macros.S>
9#include <assert_macros.S>
10#include <common/bl_common.h>
11#include <common/debug.h>
12#include <cpu_macros.S>
13#include <lib/cpus/cpu_ops.h>
14#include <lib/cpus/errata.h>
15#include <lib/el3_runtime/cpu_data.h>
16
17 /* Reset fn is needed in BL at reset vector */
18#if defined(IMAGE_BL1) || defined(IMAGE_BL31) ||	\
19	(defined(IMAGE_BL2) && RESET_TO_BL2)
20	/*
21	 * The reset handler common to all platforms.  After a matching
22	 * cpu_ops structure entry is found, the correponding reset_handler
23	 * in the cpu_ops is invoked.
24	 * Clobbers: x0 - x19, x30
25	 */
26	.globl	reset_handler
27func reset_handler
28	mov	x19, x30
29
30	/* The plat_reset_handler can clobber x0 - x18, x30 */
31	bl	plat_reset_handler
32
33	/* Get the matching cpu_ops pointer */
34	bl	get_cpu_ops_ptr
35
36#if ENABLE_ASSERTIONS
37	/*
38	 * Assert if invalid cpu_ops obtained. If this is not valid, it may
39	 * suggest that the proper CPU file hasn't been included.
40	 */
41	cmp	x0, #0
42	ASM_ASSERT(ne)
43#endif
44
45	/* Get the cpu_ops reset handler */
46	ldr	x2, [x0, #CPU_RESET_FUNC]
47	mov	x30, x19
48	cbz	x2, 1f
49
50	/* The cpu_ops reset handler can clobber x0 - x19, x30 */
51	br	x2
521:
53	ret
54endfunc reset_handler
55
56#endif
57
58#ifdef IMAGE_BL31 /* The power down core and cluster is needed only in  BL31 */
59	/*
60	 * void prepare_cpu_pwr_dwn(unsigned int power_level)
61	 *
62	 * Prepare CPU power down function for all platforms. The function takes
63	 * a domain level to be powered down as its parameter. After the cpu_ops
64	 * pointer is retrieved from cpu_data, the handler for requested power
65	 * level is called.
66	 */
67	.globl	prepare_cpu_pwr_dwn
68func prepare_cpu_pwr_dwn
69	/*
70	 * If the given power level exceeds CPU_MAX_PWR_DWN_OPS, we call the
71	 * power down handler for the last power level
72	 */
73	mov_imm	x2, (CPU_MAX_PWR_DWN_OPS - 1)
74	cmp	x0, x2
75	csel	x2, x2, x0, hi
76
77	mrs	x1, tpidr_el3
78	ldr	x0, [x1, #CPU_DATA_CPU_OPS_PTR]
79#if ENABLE_ASSERTIONS
80	cmp	x0, #0
81	ASM_ASSERT(ne)
82#endif
83
84	/* Get the appropriate power down handler */
85	mov	x1, #CPU_PWR_DWN_OPS
86	add	x1, x1, x2, lsl #3
87	ldr	x1, [x0, x1]
88#if ENABLE_ASSERTIONS
89	cmp	x1, #0
90	ASM_ASSERT(ne)
91#endif
92	br	x1
93endfunc prepare_cpu_pwr_dwn
94
95
96	/*
97	 * Initializes the cpu_ops_ptr if not already initialized
98	 * in cpu_data. This can be called without a runtime stack, but may
99	 * only be called after the MMU is enabled.
100	 * clobbers: x0 - x6, x10
101	 */
102	.globl	init_cpu_ops
103func init_cpu_ops
104	mrs	x6, tpidr_el3
105	ldr	x0, [x6, #CPU_DATA_CPU_OPS_PTR]
106	cbnz	x0, 1f
107	mov	x10, x30
108	bl	get_cpu_ops_ptr
109	str	x0, [x6, #CPU_DATA_CPU_OPS_PTR]!
110	mov x30, x10
1111:
112	ret
113endfunc init_cpu_ops
114#endif /* IMAGE_BL31 */
115
116#if defined(IMAGE_BL31) && CRASH_REPORTING
117	/*
118	 * The cpu specific registers which need to be reported in a crash
119	 * are reported via cpu_ops cpu_reg_dump function. After a matching
120	 * cpu_ops structure entry is found, the correponding cpu_reg_dump
121	 * in the cpu_ops is invoked.
122	 */
123	.globl	do_cpu_reg_dump
124func do_cpu_reg_dump
125	mov	x16, x30
126
127	/* Get the matching cpu_ops pointer */
128	bl	get_cpu_ops_ptr
129	cbz	x0, 1f
130
131	/* Get the cpu_ops cpu_reg_dump */
132	ldr	x2, [x0, #CPU_REG_DUMP]
133	cbz	x2, 1f
134	blr	x2
1351:
136	mov	x30, x16
137	ret
138endfunc do_cpu_reg_dump
139#endif
140
141	/*
142	 * The below function returns the cpu_ops structure matching the
143	 * midr of the core. It reads the MIDR_EL1 and finds the matching
144	 * entry in cpu_ops entries. Only the implementation and part number
145	 * are used to match the entries.
146	 *
147	 * If cpu_ops for the MIDR_EL1 cannot be found and
148	 * SUPPORT_UNKNOWN_MPID is enabled, it will try to look for a
149	 * default cpu_ops with an MIDR value of 0.
150	 * (Implementation number 0x0 should be reserved for software use
151	 * and therefore no clashes should happen with that default value).
152	 *
153	 * Return :
154	 *     x0 - The matching cpu_ops pointer on Success
155	 *     x0 - 0 on failure.
156	 * Clobbers : x0 - x5
157	 */
158	.globl	get_cpu_ops_ptr
159func get_cpu_ops_ptr
160	/* Read the MIDR_EL1 */
161	mrs	x2, midr_el1
162	mov_imm	x3, CPU_IMPL_PN_MASK
163
164	/* Retain only the implementation and part number using mask */
165	and	w2, w2, w3
166
167	/* Get the cpu_ops end location */
168	adr_l	x5, (__CPU_OPS_END__ + CPU_MIDR)
169
170	/* Initialize the return parameter */
171	mov	x0, #0
1721:
173	/* Get the cpu_ops start location */
174	adr_l	x4, (__CPU_OPS_START__ + CPU_MIDR)
175
1762:
177	/* Check if we have reached end of list */
178	cmp	x4, x5
179	b.eq	search_def_ptr
180
181	/* load the midr from the cpu_ops */
182	ldr	x1, [x4], #CPU_OPS_SIZE
183	and	w1, w1, w3
184
185	/* Check if midr matches to midr of this core */
186	cmp	w1, w2
187	b.ne	2b
188
189	/* Subtract the increment and offset to get the cpu-ops pointer */
190	sub	x0, x4, #(CPU_OPS_SIZE + CPU_MIDR)
191#if ENABLE_ASSERTIONS
192	cmp	x0, #0
193	ASM_ASSERT(ne)
194#endif
195#ifdef SUPPORT_UNKNOWN_MPID
196	cbnz	x2, exit_mpid_found
197	/* Mark the unsupported MPID flag */
198	adrp	x1, unsupported_mpid_flag
199	add	x1, x1, :lo12:unsupported_mpid_flag
200	str	w2, [x1]
201exit_mpid_found:
202#endif
203	ret
204
205	/*
206	 * Search again for a default pointer (MIDR = 0x0)
207	 * or return error if already searched.
208	 */
209search_def_ptr:
210#ifdef SUPPORT_UNKNOWN_MPID
211	cbz	x2, error_exit
212	mov	x2, #0
213	b	1b
214error_exit:
215#endif
216	ret
217endfunc get_cpu_ops_ptr
218
219	.globl	cpu_get_rev_var
220func cpu_get_rev_var
221	get_rev_var x0, x1
222	ret
223endfunc cpu_get_rev_var
224
225/*
226 * int check_wa_cve_2017_5715(void);
227 *
228 * This function returns:
229 *  - ERRATA_APPLIES when firmware mitigation is required.
230 *  - ERRATA_NOT_APPLIES when firmware mitigation is _not_ required.
231 *  - ERRATA_MISSING when firmware mitigation would be required but
232 *    is not compiled in.
233 *
234 * NOTE: Must be called only after cpu_ops have been initialized
235 *       in per-CPU data.
236 */
237	.globl	check_wa_cve_2017_5715
238func check_wa_cve_2017_5715
239	mrs	x0, tpidr_el3
240#if ENABLE_ASSERTIONS
241	cmp	x0, #0
242	ASM_ASSERT(ne)
243#endif
244	ldr	x0, [x0, #CPU_DATA_CPU_OPS_PTR]
245#if ENABLE_ASSERTIONS
246	cmp	x0, #0
247	ASM_ASSERT(ne)
248#endif
249	ldr	x0, [x0, #CPU_EXTRA1_FUNC]
250	/*
251	 * If the reserved function pointer is NULL, this CPU
252	 * is unaffected by CVE-2017-5715 so bail out.
253	 */
254	cmp	x0, #CPU_NO_EXTRA1_FUNC
255	beq	1f
256	br	x0
2571:
258	mov	x0, #ERRATA_NOT_APPLIES
259	ret
260endfunc check_wa_cve_2017_5715
261
262/*
263 * int check_wa_cve_2024_7881(void);
264 *
265 * This function returns:
266 *  - ERRATA_APPLIES when firmware mitigation is required.
267 *  - ERRATA_NOT_APPLIES when firmware mitigation is _not_ required.
268 *  - ERRATA_MISSING when firmware mitigation would be required but
269 *    is not compiled in.
270 *
271 * NOTE: Must be called only after cpu_ops have been initialized
272 *       in per-CPU data.
273 */
274.globl	check_wa_cve_2024_7881
275func check_wa_cve_2024_7881
276	mrs	x0, tpidr_el3
277#if ENABLE_ASSERTIONS
278	cmp	x0, #0
279	ASM_ASSERT(ne)
280#endif
281	ldr	x0, [x0, #CPU_DATA_CPU_OPS_PTR]
282#if ENABLE_ASSERTIONS
283	cmp	x0, #0
284	ASM_ASSERT(ne)
285#endif
286	ldr	x0, [x0, #CPU_EXTRA4_FUNC]
287	/*
288	 * If the reserved function pointer is NULL, this CPU
289	 * is unaffected by CVE-2024-7881 so bail out.
290	 */
291	cmp	x0, #CPU_NO_EXTRA4_FUNC
292	beq	1f
293	br	x0
2941:
295	mov	x0, #ERRATA_NOT_APPLIES
296	ret
297endfunc check_wa_cve_2024_7881
298
299/*
300 * void *wa_cve_2018_3639_get_disable_ptr(void);
301 *
302 * Returns a function pointer which is used to disable mitigation
303 * for CVE-2018-3639.
304 * The function pointer is only returned on cores that employ
305 * dynamic mitigation.  If the core uses static mitigation or is
306 * unaffected by CVE-2018-3639 this function returns NULL.
307 *
308 * NOTE: Must be called only after cpu_ops have been initialized
309 *       in per-CPU data.
310 */
311	.globl	wa_cve_2018_3639_get_disable_ptr
312func wa_cve_2018_3639_get_disable_ptr
313	mrs	x0, tpidr_el3
314#if ENABLE_ASSERTIONS
315	cmp	x0, #0
316	ASM_ASSERT(ne)
317#endif
318	ldr	x0, [x0, #CPU_DATA_CPU_OPS_PTR]
319#if ENABLE_ASSERTIONS
320	cmp	x0, #0
321	ASM_ASSERT(ne)
322#endif
323	ldr	x0, [x0, #CPU_EXTRA2_FUNC]
324	ret
325endfunc wa_cve_2018_3639_get_disable_ptr
326
327/*
328 * int check_smccc_arch_wa3_applies(void);
329 *
330 * This function checks whether SMCCC_ARCH_WORKAROUND_3 is enabled to mitigate
331 * CVE-2022-23960 for this CPU. It returns:
332 *  - ERRATA_APPLIES when SMCCC_ARCH_WORKAROUND_3 can be invoked to mitigate
333 *    the CVE.
334 *  - ERRATA_NOT_APPLIES when SMCCC_ARCH_WORKAROUND_3 should not be invoked to
335 *    mitigate the CVE.
336 *
337 * NOTE: Must be called only after cpu_ops have been initialized
338 *       in per-CPU data.
339 */
340	.globl	check_smccc_arch_wa3_applies
341func check_smccc_arch_wa3_applies
342	mrs	x0, tpidr_el3
343#if ENABLE_ASSERTIONS
344	cmp	x0, #0
345	ASM_ASSERT(ne)
346#endif
347	ldr	x0, [x0, #CPU_DATA_CPU_OPS_PTR]
348#if ENABLE_ASSERTIONS
349	cmp	x0, #0
350	ASM_ASSERT(ne)
351#endif
352	ldr	x0, [x0, #CPU_EXTRA3_FUNC]
353	/*
354	 * If the reserved function pointer is NULL, this CPU
355	 * is unaffected by CVE-2022-23960 so bail out.
356	 */
357	cmp	x0, #CPU_NO_EXTRA3_FUNC
358	beq	1f
359	br	x0
3601:
361	mov	x0, #ERRATA_NOT_APPLIES
362	ret
363endfunc check_smccc_arch_wa3_applies
364