xref: /rk3399_ARM-atf/lib/cpus/aarch64/cpu_helpers.S (revision dd9fae1ce0e7b985c9fe8f8f8ae358b8c166c6a9)
1/*
2 * Copyright (c) 2014-2023, Arm Limited and Contributors. All rights reserved.
3 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 */
6
7#include <arch.h>
8#include <asm_macros.S>
9#include <assert_macros.S>
10#include <common/bl_common.h>
11#include <common/debug.h>
12#include <cpu_macros.S>
13#include <lib/cpus/cpu_ops.h>
14#include <lib/cpus/errata.h>
15#include <lib/el3_runtime/cpu_data.h>
16
17 /* Reset fn is needed in BL at reset vector */
18#if defined(IMAGE_BL1) || defined(IMAGE_BL31) ||	\
19	(defined(IMAGE_BL2) && RESET_TO_BL2)
20	/*
21	 * The reset handler common to all platforms.  After a matching
22	 * cpu_ops structure entry is found, the correponding reset_handler
23	 * in the cpu_ops is invoked.
24	 * Clobbers: x0 - x19, x30
25	 */
26	.globl	reset_handler
27func reset_handler
28	mov	x19, x30
29
30	/* The plat_reset_handler can clobber x0 - x18, x30 */
31	bl	plat_reset_handler
32
33	/* Get the matching cpu_ops pointer */
34	bl	get_cpu_ops_ptr
35
36	/* Get the cpu_ops reset handler */
37	ldr	x2, [x0, #CPU_RESET_FUNC]
38	mov	x30, x19
39	cbz	x2, 1f
40
41	/* The cpu_ops reset handler can clobber x0 - x19, x30 */
42	br	x2
431:
44	ret
45endfunc reset_handler
46
47#endif
48
49#ifdef IMAGE_BL31 /* The power down core and cluster is needed only in  BL31 */
50	/*
51	 * void prepare_cpu_pwr_dwn(unsigned int power_level)
52	 *
53	 * Prepare CPU power down function for all platforms. The function takes
54	 * a domain level to be powered down as its parameter. After the cpu_ops
55	 * pointer is retrieved from cpu_data, the handler for requested power
56	 * level is called.
57	 */
58	.globl	prepare_cpu_pwr_dwn
59func prepare_cpu_pwr_dwn
60	/*
61	 * If the given power level exceeds CPU_MAX_PWR_DWN_OPS, we call the
62	 * power down handler for the last power level
63	 */
64	mov_imm	x2, (CPU_MAX_PWR_DWN_OPS - 1)
65	cmp	x0, x2
66	csel	x2, x2, x0, hi
67
68	mrs	x1, tpidr_el3
69	ldr	x0, [x1, #CPU_DATA_CPU_OPS_PTR]
70#if ENABLE_ASSERTIONS
71	cmp	x0, #0
72	ASM_ASSERT(ne)
73#endif
74
75	/* Get the appropriate power down handler */
76	mov	x1, #CPU_PWR_DWN_OPS
77	add	x1, x1, x2, lsl #3
78	ldr	x1, [x0, x1]
79#if ENABLE_ASSERTIONS
80	cmp	x1, #0
81	ASM_ASSERT(ne)
82#endif
83	br	x1
84endfunc prepare_cpu_pwr_dwn
85
86
87	/*
88	 * Initializes the cpu_ops_ptr if not already initialized
89	 * in cpu_data. This can be called without a runtime stack, but may
90	 * only be called after the MMU is enabled.
91	 * clobbers: x0 - x6, x10
92	 */
93	.globl	init_cpu_ops
94func init_cpu_ops
95	mrs	x6, tpidr_el3
96	ldr	x0, [x6, #CPU_DATA_CPU_OPS_PTR]
97	cbnz	x0, 1f
98	mov	x10, x30
99	bl	get_cpu_ops_ptr
100	str	x0, [x6, #CPU_DATA_CPU_OPS_PTR]!
101	mov x30, x10
1021:
103	ret
104endfunc init_cpu_ops
105#endif /* IMAGE_BL31 */
106
107#if defined(IMAGE_BL31) && CRASH_REPORTING
108	/*
109	 * The cpu specific registers which need to be reported in a crash
110	 * are reported via cpu_ops cpu_reg_dump function. After a matching
111	 * cpu_ops structure entry is found, the correponding cpu_reg_dump
112	 * in the cpu_ops is invoked.
113	 */
114	.globl	do_cpu_reg_dump
115func do_cpu_reg_dump
116	mov	x16, x30
117
118	/* Get the matching cpu_ops pointer */
119	bl	get_cpu_ops_ptr
120	cbz	x0, 1f
121
122	/* Get the cpu_ops cpu_reg_dump */
123	ldr	x2, [x0, #CPU_REG_DUMP]
124	cbz	x2, 1f
125	blr	x2
1261:
127	mov	x30, x16
128	ret
129endfunc do_cpu_reg_dump
130#endif
131
132	/*
133	 * The below function returns the cpu_ops structure matching the
134	 * midr of the core. It reads the MIDR_EL1 and finds the matching
135	 * entry in cpu_ops entries. Only the implementation and part number
136	 * are used to match the entries.
137	 *
138	 * If cpu_ops for the MIDR_EL1 cannot be found and
139	 * SUPPORT_UNKNOWN_MPID is enabled, it will try to look for a
140	 * default cpu_ops with an MIDR value of 0.
141	 * (Implementation number 0x0 should be reserved for software use
142	 * and therefore no clashes should happen with that default value).
143	 *
144	 * Return :
145	 *     x0 - The matching cpu_ops pointer on Success
146	 *     x0 - 0 on failure.
147	 * Clobbers : x0 - x5
148	 */
149	.globl	get_cpu_ops_ptr
150func get_cpu_ops_ptr
151	/* Read the MIDR_EL1 */
152	mrs	x2, midr_el1
153	mov_imm	x3, CPU_IMPL_PN_MASK
154
155	/* Retain only the implementation and part number using mask */
156	and	w2, w2, w3
157
158	/* Get the cpu_ops end location */
159	adr	x5, (__CPU_OPS_END__ + CPU_MIDR)
160
161	/* Initialize the return parameter */
162	mov	x0, #0
1631:
164	/* Get the cpu_ops start location */
165	adr	x4, (__CPU_OPS_START__ + CPU_MIDR)
166
1672:
168	/* Check if we have reached end of list */
169	cmp	x4, x5
170	b.eq	search_def_ptr
171
172	/* load the midr from the cpu_ops */
173	ldr	x1, [x4], #CPU_OPS_SIZE
174	and	w1, w1, w3
175
176	/* Check if midr matches to midr of this core */
177	cmp	w1, w2
178	b.ne	2b
179
180	/* Subtract the increment and offset to get the cpu-ops pointer */
181	sub	x0, x4, #(CPU_OPS_SIZE + CPU_MIDR)
182#if ENABLE_ASSERTIONS
183	cmp	x0, #0
184	ASM_ASSERT(ne)
185#endif
186#ifdef SUPPORT_UNKNOWN_MPID
187	cbnz	x2, exit_mpid_found
188	/* Mark the unsupported MPID flag */
189	adrp	x1, unsupported_mpid_flag
190	add	x1, x1, :lo12:unsupported_mpid_flag
191	str	w2, [x1]
192exit_mpid_found:
193#endif
194	ret
195
196	/*
197	 * Search again for a default pointer (MIDR = 0x0)
198	 * or return error if already searched.
199	 */
200search_def_ptr:
201#ifdef SUPPORT_UNKNOWN_MPID
202	cbz	x2, error_exit
203	mov	x2, #0
204	b	1b
205error_exit:
206#endif
207	ret
208endfunc get_cpu_ops_ptr
209
210/*
211 * Extract CPU revision and variant, and combine them into a single numeric for
212 * easier comparison.
213 */
214	.globl	cpu_get_rev_var
215func cpu_get_rev_var
216	mrs	x1, midr_el1
217
218	/*
219	 * Extract the variant[23:20] and revision[3:0] from MIDR, and pack them
220	 * as variant[7:4] and revision[3:0] of x0.
221	 *
222	 * First extract x1[23:16] to x0[7:0] and zero fill the rest. Then
223	 * extract x1[3:0] into x0[3:0] retaining other bits.
224	 */
225	ubfx	x0, x1, #(MIDR_VAR_SHIFT - MIDR_REV_BITS), #(MIDR_REV_BITS + MIDR_VAR_BITS)
226	bfxil	x0, x1, #MIDR_REV_SHIFT, #MIDR_REV_BITS
227	ret
228endfunc cpu_get_rev_var
229
230/*
231 * Compare the CPU's revision-variant (x0) with a given value (x1), for errata
232 * application purposes. If the revision-variant is less than or same as a given
233 * value, indicates that errata applies; otherwise not.
234 *
235 * Shall clobber: x0-x3
236 */
237	.globl	cpu_rev_var_ls
238func cpu_rev_var_ls
239	mov	x2, #ERRATA_APPLIES
240	mov	x3, #ERRATA_NOT_APPLIES
241	cmp	x0, x1
242	csel	x0, x2, x3, ls
243	ret
244endfunc cpu_rev_var_ls
245
246/*
247 * Compare the CPU's revision-variant (x0) with a given value (x1), for errata
248 * application purposes. If the revision-variant is higher than or same as a
249 * given value, indicates that errata applies; otherwise not.
250 *
251 * Shall clobber: x0-x3
252 */
253	.globl	cpu_rev_var_hs
254func cpu_rev_var_hs
255	mov	x2, #ERRATA_APPLIES
256	mov	x3, #ERRATA_NOT_APPLIES
257	cmp	x0, x1
258	csel	x0, x2, x3, hs
259	ret
260endfunc cpu_rev_var_hs
261
262/*
263 * Compare the CPU's revision-variant (x0) with a given range (x1 - x2), for errata
264 * application purposes. If the revision-variant is between or includes the given
265 * values, this indicates that errata applies; otherwise not.
266 *
267 * Shall clobber: x0-x4
268 */
269	.globl	cpu_rev_var_range
270func cpu_rev_var_range
271	mov	x3, #ERRATA_APPLIES
272	mov	x4, #ERRATA_NOT_APPLIES
273	cmp	x0, x1
274	csel	x1, x3, x4, hs
275	cbz	x1, 1f
276	cmp	x0, x2
277	csel	x1, x3, x4, ls
2781:
279	mov	x0, x1
280	ret
281endfunc cpu_rev_var_range
282
283/*
284 * int check_wa_cve_2017_5715(void);
285 *
286 * This function returns:
287 *  - ERRATA_APPLIES when firmware mitigation is required.
288 *  - ERRATA_NOT_APPLIES when firmware mitigation is _not_ required.
289 *  - ERRATA_MISSING when firmware mitigation would be required but
290 *    is not compiled in.
291 *
292 * NOTE: Must be called only after cpu_ops have been initialized
293 *       in per-CPU data.
294 */
295	.globl	check_wa_cve_2017_5715
296func check_wa_cve_2017_5715
297	mrs	x0, tpidr_el3
298#if ENABLE_ASSERTIONS
299	cmp	x0, #0
300	ASM_ASSERT(ne)
301#endif
302	ldr	x0, [x0, #CPU_DATA_CPU_OPS_PTR]
303#if ENABLE_ASSERTIONS
304	cmp	x0, #0
305	ASM_ASSERT(ne)
306#endif
307	ldr	x0, [x0, #CPU_EXTRA1_FUNC]
308	/*
309	 * If the reserved function pointer is NULL, this CPU
310	 * is unaffected by CVE-2017-5715 so bail out.
311	 */
312	cmp	x0, #CPU_NO_EXTRA1_FUNC
313	beq	1f
314	br	x0
3151:
316	mov	x0, #ERRATA_NOT_APPLIES
317	ret
318endfunc check_wa_cve_2017_5715
319
320/*
321 * void *wa_cve_2018_3639_get_disable_ptr(void);
322 *
323 * Returns a function pointer which is used to disable mitigation
324 * for CVE-2018-3639.
325 * The function pointer is only returned on cores that employ
326 * dynamic mitigation.  If the core uses static mitigation or is
327 * unaffected by CVE-2018-3639 this function returns NULL.
328 *
329 * NOTE: Must be called only after cpu_ops have been initialized
330 *       in per-CPU data.
331 */
332	.globl	wa_cve_2018_3639_get_disable_ptr
333func wa_cve_2018_3639_get_disable_ptr
334	mrs	x0, tpidr_el3
335#if ENABLE_ASSERTIONS
336	cmp	x0, #0
337	ASM_ASSERT(ne)
338#endif
339	ldr	x0, [x0, #CPU_DATA_CPU_OPS_PTR]
340#if ENABLE_ASSERTIONS
341	cmp	x0, #0
342	ASM_ASSERT(ne)
343#endif
344	ldr	x0, [x0, #CPU_EXTRA2_FUNC]
345	ret
346endfunc wa_cve_2018_3639_get_disable_ptr
347
348/*
349 * int check_smccc_arch_wa3_applies(void);
350 *
351 * This function checks whether SMCCC_ARCH_WORKAROUND_3 is enabled to mitigate
352 * CVE-2022-23960 for this CPU. It returns:
353 *  - ERRATA_APPLIES when SMCCC_ARCH_WORKAROUND_3 can be invoked to mitigate
354 *    the CVE.
355 *  - ERRATA_NOT_APPLIES when SMCCC_ARCH_WORKAROUND_3 should not be invoked to
356 *    mitigate the CVE.
357 *
358 * NOTE: Must be called only after cpu_ops have been initialized
359 *       in per-CPU data.
360 */
361	.globl	check_smccc_arch_wa3_applies
362func check_smccc_arch_wa3_applies
363	mrs	x0, tpidr_el3
364#if ENABLE_ASSERTIONS
365	cmp	x0, #0
366	ASM_ASSERT(ne)
367#endif
368	ldr	x0, [x0, #CPU_DATA_CPU_OPS_PTR]
369#if ENABLE_ASSERTIONS
370	cmp	x0, #0
371	ASM_ASSERT(ne)
372#endif
373	ldr	x0, [x0, #CPU_EXTRA3_FUNC]
374	/*
375	 * If the reserved function pointer is NULL, this CPU
376	 * is unaffected by CVE-2022-23960 so bail out.
377	 */
378	cmp	x0, #CPU_NO_EXTRA3_FUNC
379	beq	1f
380	br	x0
3811:
382	mov	x0, #ERRATA_NOT_APPLIES
383	ret
384endfunc check_smccc_arch_wa3_applies
385