xref: /rk3399_ARM-atf/lib/cpus/aarch64/cpu_helpers.S (revision 10ecd58093a34e95e2dfad65b1180610f29397cc)
1/*
2 * Copyright (c) 2014-2025, Arm Limited and Contributors. All rights reserved.
3 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 */
6
7#include <arch.h>
8#include <asm_macros.S>
9#include <assert_macros.S>
10#include <common/bl_common.h>
11#include <common/debug.h>
12#include <cpu_macros.S>
13#include <lib/cpus/cpu_ops.h>
14#include <lib/cpus/errata.h>
15#include <lib/el3_runtime/cpu_data.h>
16
17#ifdef IMAGE_BL31 /* The power down core and cluster is needed only in  BL31 */
18	/*
19	 * void prepare_cpu_pwr_dwn(unsigned int power_level)
20	 *
21	 * Prepare CPU power down function for all platforms. The function takes
22	 * a domain level to be powered down as its parameter. After the cpu_ops
23	 * pointer is retrieved from cpu_data, the handler for requested power
24	 * level is called.
25	 */
26	.globl	prepare_cpu_pwr_dwn
27func prepare_cpu_pwr_dwn
28	/*
29	 * If the given power level exceeds CPU_MAX_PWR_DWN_OPS, we call the
30	 * power down handler for the last power level
31	 */
32	mov_imm	x2, (CPU_MAX_PWR_DWN_OPS - 1)
33	cmp	x0, x2
34	csel	x2, x2, x0, hi
35
36	mrs	x1, tpidr_el3
37	ldr	x0, [x1, #CPU_DATA_CPU_OPS_PTR]
38#if ENABLE_ASSERTIONS
39	cmp	x0, #0
40	ASM_ASSERT(ne)
41#endif
42
43	/* Get the appropriate power down handler */
44	mov	x1, #CPU_PWR_DWN_OPS
45	add	x1, x1, x2, lsl #3
46	ldr	x1, [x0, x1]
47#if ENABLE_ASSERTIONS
48	cmp	x1, #0
49	ASM_ASSERT(ne)
50#endif
51	br	x1
52endfunc prepare_cpu_pwr_dwn
53
54
55	/*
56	 * Initializes the cpu_ops_ptr if not already initialized
57	 * in cpu_data. This can be called without a runtime stack, but may
58	 * only be called after the MMU is enabled.
59	 * clobbers: x0 - x6, x10
60	 */
61	.globl	init_cpu_ops
62func init_cpu_ops
63	mrs	x6, tpidr_el3
64	ldr	x0, [x6, #CPU_DATA_CPU_OPS_PTR]
65	cbnz	x0, 1f
66	mov	x10, x30
67	bl	get_cpu_ops_ptr
68	str	x0, [x6, #CPU_DATA_CPU_OPS_PTR]!
69	mov x30, x10
701:
71	ret
72endfunc init_cpu_ops
73#endif /* IMAGE_BL31 */
74
75#if defined(IMAGE_BL31) && CRASH_REPORTING
76	/*
77	 * The cpu specific registers which need to be reported in a crash
78	 * are reported via cpu_ops cpu_reg_dump function. After a matching
79	 * cpu_ops structure entry is found, the correponding cpu_reg_dump
80	 * in the cpu_ops is invoked.
81	 */
82	.globl	do_cpu_reg_dump
83func do_cpu_reg_dump
84	mov	x16, x30
85
86	/* Get the matching cpu_ops pointer */
87	bl	get_cpu_ops_ptr
88	cbz	x0, 1f
89
90	/* Get the cpu_ops cpu_reg_dump */
91	ldr	x2, [x0, #CPU_REG_DUMP]
92	cbz	x2, 1f
93	blr	x2
941:
95	mov	x30, x16
96	ret
97endfunc do_cpu_reg_dump
98#endif
99
100	/*
101	 * The below function returns the cpu_ops structure matching the
102	 * midr of the core. It reads the MIDR_EL1 and finds the matching
103	 * entry in cpu_ops entries. Only the implementation and part number
104	 * are used to match the entries.
105	 *
106	 * If cpu_ops for the MIDR_EL1 cannot be found and
107	 * SUPPORT_UNKNOWN_MPID is enabled, it will try to look for a
108	 * default cpu_ops with an MIDR value of 0.
109	 * (Implementation number 0x0 should be reserved for software use
110	 * and therefore no clashes should happen with that default value).
111	 *
112	 * Return :
113	 *     x0 - The matching cpu_ops pointer on Success
114	 *     x0 - 0 on failure.
115	 * Clobbers : x0 - x5
116	 */
117	.globl	get_cpu_ops_ptr
118func get_cpu_ops_ptr
119	/* Read the MIDR_EL1 */
120	mrs	x2, midr_el1
121	mov_imm	x3, CPU_IMPL_PN_MASK
122
123	/* Retain only the implementation and part number using mask */
124	and	w2, w2, w3
125
126	/* Get the cpu_ops end location */
127	adr_l	x5, (__CPU_OPS_END__ + CPU_MIDR)
128
129	/* Initialize the return parameter */
130	mov	x0, #0
1311:
132	/* Get the cpu_ops start location */
133	adr_l	x4, (__CPU_OPS_START__ + CPU_MIDR)
134
1352:
136	/* Check if we have reached end of list */
137	cmp	x4, x5
138	b.eq	search_def_ptr
139
140	/* load the midr from the cpu_ops */
141	ldr	x1, [x4], #CPU_OPS_SIZE
142	and	w1, w1, w3
143
144	/* Check if midr matches to midr of this core */
145	cmp	w1, w2
146	b.ne	2b
147
148	/* Subtract the increment and offset to get the cpu-ops pointer */
149	sub	x0, x4, #(CPU_OPS_SIZE + CPU_MIDR)
150#if ENABLE_ASSERTIONS
151	cmp	x0, #0
152	ASM_ASSERT(ne)
153#endif
154#ifdef SUPPORT_UNKNOWN_MPID
155	cbnz	x2, exit_mpid_found
156	/* Mark the unsupported MPID flag */
157	adrp	x1, unsupported_mpid_flag
158	add	x1, x1, :lo12:unsupported_mpid_flag
159	str	w2, [x1]
160exit_mpid_found:
161#endif
162	ret
163
164	/*
165	 * Search again for a default pointer (MIDR = 0x0)
166	 * or return error if already searched.
167	 */
168search_def_ptr:
169#ifdef SUPPORT_UNKNOWN_MPID
170	cbz	x2, error_exit
171	mov	x2, #0
172	b	1b
173error_exit:
174#endif
175#if ENABLE_ASSERTIONS
176	/*
177	 * Assert if invalid cpu_ops obtained. If this is not valid, it may
178	 * suggest that the proper CPU file hasn't been included.
179	 */
180	cmp	x0, #0
181	ASM_ASSERT(ne)
182#endif
183	ret
184endfunc get_cpu_ops_ptr
185
186	.globl	cpu_get_rev_var
187func cpu_get_rev_var
188	get_rev_var x0, x1
189	ret
190endfunc cpu_get_rev_var
191
192/*
193 * int check_wa_cve_2017_5715(void);
194 *
195 * This function returns:
196 *  - ERRATA_APPLIES when firmware mitigation is required.
197 *  - ERRATA_NOT_APPLIES when firmware mitigation is _not_ required.
198 *  - ERRATA_MISSING when firmware mitigation would be required but
199 *    is not compiled in.
200 *
201 * NOTE: Must be called only after cpu_ops have been initialized
202 *       in per-CPU data.
203 */
204	.globl	check_wa_cve_2017_5715
205func check_wa_cve_2017_5715
206	mrs	x0, tpidr_el3
207#if ENABLE_ASSERTIONS
208	cmp	x0, #0
209	ASM_ASSERT(ne)
210#endif
211	ldr	x0, [x0, #CPU_DATA_CPU_OPS_PTR]
212#if ENABLE_ASSERTIONS
213	cmp	x0, #0
214	ASM_ASSERT(ne)
215#endif
216	ldr	x0, [x0, #CPU_EXTRA1_FUNC]
217	/*
218	 * If the reserved function pointer is NULL, this CPU
219	 * is unaffected by CVE-2017-5715 so bail out.
220	 */
221	cmp	x0, #CPU_NO_EXTRA1_FUNC
222	beq	1f
223	br	x0
2241:
225	mov	x0, #ERRATA_NOT_APPLIES
226	ret
227endfunc check_wa_cve_2017_5715
228
229/*
230 * int check_wa_cve_2024_7881(void);
231 *
232 * This function returns:
233 *  - ERRATA_APPLIES when firmware mitigation is required.
234 *  - ERRATA_NOT_APPLIES when firmware mitigation is _not_ required.
235 *  - ERRATA_MISSING when firmware mitigation would be required but
236 *    is not compiled in.
237 *
238 * NOTE: Must be called only after cpu_ops have been initialized
239 *       in per-CPU data.
240 */
241.globl	check_wa_cve_2024_7881
242func check_wa_cve_2024_7881
243	mrs	x0, tpidr_el3
244#if ENABLE_ASSERTIONS
245	cmp	x0, #0
246	ASM_ASSERT(ne)
247#endif
248	ldr	x0, [x0, #CPU_DATA_CPU_OPS_PTR]
249#if ENABLE_ASSERTIONS
250	cmp	x0, #0
251	ASM_ASSERT(ne)
252#endif
253	ldr	x0, [x0, #CPU_EXTRA4_FUNC]
254	/*
255	 * If the reserved function pointer is NULL, this CPU
256	 * is unaffected by CVE-2024-7881 so bail out.
257	 */
258	cmp	x0, #CPU_NO_EXTRA4_FUNC
259	beq	1f
260	br	x0
2611:
262	mov	x0, #ERRATA_NOT_APPLIES
263	ret
264endfunc check_wa_cve_2024_7881
265
266/*
267 * void *wa_cve_2018_3639_get_disable_ptr(void);
268 *
269 * Returns a function pointer which is used to disable mitigation
270 * for CVE-2018-3639.
271 * The function pointer is only returned on cores that employ
272 * dynamic mitigation.  If the core uses static mitigation or is
273 * unaffected by CVE-2018-3639 this function returns NULL.
274 *
275 * NOTE: Must be called only after cpu_ops have been initialized
276 *       in per-CPU data.
277 */
278	.globl	wa_cve_2018_3639_get_disable_ptr
279func wa_cve_2018_3639_get_disable_ptr
280	mrs	x0, tpidr_el3
281#if ENABLE_ASSERTIONS
282	cmp	x0, #0
283	ASM_ASSERT(ne)
284#endif
285	ldr	x0, [x0, #CPU_DATA_CPU_OPS_PTR]
286#if ENABLE_ASSERTIONS
287	cmp	x0, #0
288	ASM_ASSERT(ne)
289#endif
290	ldr	x0, [x0, #CPU_EXTRA2_FUNC]
291	ret
292endfunc wa_cve_2018_3639_get_disable_ptr
293
294/*
295 * int check_smccc_arch_wa3_applies(void);
296 *
297 * This function checks whether SMCCC_ARCH_WORKAROUND_3 is enabled to mitigate
298 * CVE-2022-23960 for this CPU. It returns:
299 *  - ERRATA_APPLIES when SMCCC_ARCH_WORKAROUND_3 can be invoked to mitigate
300 *    the CVE.
301 *  - ERRATA_NOT_APPLIES when SMCCC_ARCH_WORKAROUND_3 should not be invoked to
302 *    mitigate the CVE.
303 *
304 * NOTE: Must be called only after cpu_ops have been initialized
305 *       in per-CPU data.
306 */
307	.globl	check_smccc_arch_wa3_applies
308func check_smccc_arch_wa3_applies
309	mrs	x0, tpidr_el3
310#if ENABLE_ASSERTIONS
311	cmp	x0, #0
312	ASM_ASSERT(ne)
313#endif
314	ldr	x0, [x0, #CPU_DATA_CPU_OPS_PTR]
315#if ENABLE_ASSERTIONS
316	cmp	x0, #0
317	ASM_ASSERT(ne)
318#endif
319	ldr	x0, [x0, #CPU_EXTRA3_FUNC]
320	/*
321	 * If the reserved function pointer is NULL, this CPU
322	 * is unaffected by CVE-2022-23960 so bail out.
323	 */
324	cmp	x0, #CPU_NO_EXTRA3_FUNC
325	beq	1f
326	br	x0
3271:
328	mov	x0, #ERRATA_NOT_APPLIES
329	ret
330endfunc check_smccc_arch_wa3_applies
331