xref: /rk3399_ARM-atf/lib/cpus/aarch64/cpu_helpers.S (revision 6bb96fa6d6e101ffeef16464f8a44104a112074f)
1/*
2 * Copyright (c) 2014-2023, Arm Limited and Contributors. All rights reserved.
3 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 */
6
7#include <arch.h>
8#include <asm_macros.S>
9#include <assert_macros.S>
10#include <common/bl_common.h>
11#include <common/debug.h>
12#include <cpu_macros.S>
13#include <lib/cpus/cpu_ops.h>
14#include <lib/cpus/errata.h>
15#include <lib/el3_runtime/cpu_data.h>
16
17 /* Reset fn is needed in BL at reset vector */
18#if defined(IMAGE_BL1) || defined(IMAGE_BL31) ||	\
19	(defined(IMAGE_BL2) && RESET_TO_BL2)
20	/*
21	 * The reset handler common to all platforms.  After a matching
22	 * cpu_ops structure entry is found, the correponding reset_handler
23	 * in the cpu_ops is invoked.
24	 * Clobbers: x0 - x19, x30
25	 */
26	.globl	reset_handler
27func reset_handler
28	mov	x19, x30
29
30	/* The plat_reset_handler can clobber x0 - x18, x30 */
31	bl	plat_reset_handler
32
33	/* Get the matching cpu_ops pointer */
34	bl	get_cpu_ops_ptr
35
36	/* Get the cpu_ops reset handler */
37	ldr	x2, [x0, #CPU_RESET_FUNC]
38	mov	x30, x19
39	cbz	x2, 1f
40
41	/* The cpu_ops reset handler can clobber x0 - x19, x30 */
42	br	x2
431:
44	ret
45endfunc reset_handler
46
47#endif
48
49#ifdef IMAGE_BL31 /* The power down core and cluster is needed only in  BL31 */
50	/*
51	 * void prepare_cpu_pwr_dwn(unsigned int power_level)
52	 *
53	 * Prepare CPU power down function for all platforms. The function takes
54	 * a domain level to be powered down as its parameter. After the cpu_ops
55	 * pointer is retrieved from cpu_data, the handler for requested power
56	 * level is called.
57	 */
58	.globl	prepare_cpu_pwr_dwn
59func prepare_cpu_pwr_dwn
60	/*
61	 * If the given power level exceeds CPU_MAX_PWR_DWN_OPS, we call the
62	 * power down handler for the last power level
63	 */
64	mov_imm	x2, (CPU_MAX_PWR_DWN_OPS - 1)
65	cmp	x0, x2
66	csel	x2, x2, x0, hi
67
68	mrs	x1, tpidr_el3
69	ldr	x0, [x1, #CPU_DATA_CPU_OPS_PTR]
70#if ENABLE_ASSERTIONS
71	cmp	x0, #0
72	ASM_ASSERT(ne)
73#endif
74
75	/* Get the appropriate power down handler */
76	mov	x1, #CPU_PWR_DWN_OPS
77	add	x1, x1, x2, lsl #3
78	ldr	x1, [x0, x1]
79#if ENABLE_ASSERTIONS
80	cmp	x1, #0
81	ASM_ASSERT(ne)
82#endif
83	br	x1
84endfunc prepare_cpu_pwr_dwn
85
86
87	/*
88	 * Initializes the cpu_ops_ptr if not already initialized
89	 * in cpu_data. This can be called without a runtime stack, but may
90	 * only be called after the MMU is enabled.
91	 * clobbers: x0 - x6, x10
92	 */
93	.globl	init_cpu_ops
94func init_cpu_ops
95	mrs	x6, tpidr_el3
96	ldr	x0, [x6, #CPU_DATA_CPU_OPS_PTR]
97	cbnz	x0, 1f
98	mov	x10, x30
99	bl	get_cpu_ops_ptr
100	str	x0, [x6, #CPU_DATA_CPU_OPS_PTR]!
101	mov x30, x10
1021:
103	ret
104endfunc init_cpu_ops
105#endif /* IMAGE_BL31 */
106
107#if defined(IMAGE_BL31) && CRASH_REPORTING
108	/*
109	 * The cpu specific registers which need to be reported in a crash
110	 * are reported via cpu_ops cpu_reg_dump function. After a matching
111	 * cpu_ops structure entry is found, the correponding cpu_reg_dump
112	 * in the cpu_ops is invoked.
113	 */
114	.globl	do_cpu_reg_dump
115func do_cpu_reg_dump
116	mov	x16, x30
117
118	/* Get the matching cpu_ops pointer */
119	bl	get_cpu_ops_ptr
120	cbz	x0, 1f
121
122	/* Get the cpu_ops cpu_reg_dump */
123	ldr	x2, [x0, #CPU_REG_DUMP]
124	cbz	x2, 1f
125	blr	x2
1261:
127	mov	x30, x16
128	ret
129endfunc do_cpu_reg_dump
130#endif
131
132	/*
133	 * The below function returns the cpu_ops structure matching the
134	 * midr of the core. It reads the MIDR_EL1 and finds the matching
135	 * entry in cpu_ops entries. Only the implementation and part number
136	 * are used to match the entries.
137	 *
138	 * If cpu_ops for the MIDR_EL1 cannot be found and
139	 * SUPPORT_UNKNOWN_MPID is enabled, it will try to look for a
140	 * default cpu_ops with an MIDR value of 0.
141	 * (Implementation number 0x0 should be reserved for software use
142	 * and therefore no clashes should happen with that default value).
143	 *
144	 * Return :
145	 *     x0 - The matching cpu_ops pointer on Success
146	 *     x0 - 0 on failure.
147	 * Clobbers : x0 - x5
148	 */
149	.globl	get_cpu_ops_ptr
150func get_cpu_ops_ptr
151	/* Read the MIDR_EL1 */
152	mrs	x2, midr_el1
153	mov_imm	x3, CPU_IMPL_PN_MASK
154
155	/* Retain only the implementation and part number using mask */
156	and	w2, w2, w3
157
158	/* Get the cpu_ops end location */
159	adr	x5, (__CPU_OPS_END__ + CPU_MIDR)
160
161	/* Initialize the return parameter */
162	mov	x0, #0
1631:
164	/* Get the cpu_ops start location */
165	adr	x4, (__CPU_OPS_START__ + CPU_MIDR)
166
1672:
168	/* Check if we have reached end of list */
169	cmp	x4, x5
170	b.eq	search_def_ptr
171
172	/* load the midr from the cpu_ops */
173	ldr	x1, [x4], #CPU_OPS_SIZE
174	and	w1, w1, w3
175
176	/* Check if midr matches to midr of this core */
177	cmp	w1, w2
178	b.ne	2b
179
180	/* Subtract the increment and offset to get the cpu-ops pointer */
181	sub	x0, x4, #(CPU_OPS_SIZE + CPU_MIDR)
182#if ENABLE_ASSERTIONS
183	cmp	x0, #0
184	ASM_ASSERT(ne)
185#endif
186#ifdef SUPPORT_UNKNOWN_MPID
187	cbnz	x2, exit_mpid_found
188	/* Mark the unsupported MPID flag */
189	adrp	x1, unsupported_mpid_flag
190	add	x1, x1, :lo12:unsupported_mpid_flag
191	str	w2, [x1]
192exit_mpid_found:
193#endif
194	ret
195
196	/*
197	 * Search again for a default pointer (MIDR = 0x0)
198	 * or return error if already searched.
199	 */
200search_def_ptr:
201#ifdef SUPPORT_UNKNOWN_MPID
202	cbz	x2, error_exit
203	mov	x2, #0
204	b	1b
205error_exit:
206#endif
207	ret
208endfunc get_cpu_ops_ptr
209
210/*
211 * Extract CPU revision and variant, and combine them into a single numeric for
212 * easier comparison.
213 */
214	.globl	cpu_get_rev_var
215func cpu_get_rev_var
216	mrs	x1, midr_el1
217
218	/*
219	 * Extract the variant[23:20] and revision[3:0] from MIDR, and pack them
220	 * as variant[7:4] and revision[3:0] of x0.
221	 *
222	 * First extract x1[23:16] to x0[7:0] and zero fill the rest. Then
223	 * extract x1[3:0] into x0[3:0] retaining other bits.
224	 */
225	ubfx	x0, x1, #(MIDR_VAR_SHIFT - MIDR_REV_BITS), #(MIDR_REV_BITS + MIDR_VAR_BITS)
226	bfxil	x0, x1, #MIDR_REV_SHIFT, #MIDR_REV_BITS
227	ret
228endfunc cpu_get_rev_var
229
230/*
231 * Compare the CPU's revision-variant (x0) with a given value (x1), for errata
232 * application purposes. If the revision-variant is less than or same as a given
233 * value, indicates that errata applies; otherwise not.
234 *
235 * Shall clobber: x0-x3
236 */
237	.globl	cpu_rev_var_ls
238func cpu_rev_var_ls
239	mov	x2, #ERRATA_APPLIES
240	mov	x3, #ERRATA_NOT_APPLIES
241	cmp	x0, x1
242	csel	x0, x2, x3, ls
243	ret
244endfunc cpu_rev_var_ls
245
246/*
247 * Compare the CPU's revision-variant (x0) with a given value (x1), for errata
248 * application purposes. If the revision-variant is higher than or same as a
249 * given value, indicates that errata applies; otherwise not.
250 *
251 * Shall clobber: x0-x3
252 */
253	.globl	cpu_rev_var_hs
254func cpu_rev_var_hs
255	mov	x2, #ERRATA_APPLIES
256	mov	x3, #ERRATA_NOT_APPLIES
257	cmp	x0, x1
258	csel	x0, x2, x3, hs
259	ret
260endfunc cpu_rev_var_hs
261
262/*
263 * Compare the CPU's revision-variant (x0) with a given range (x1 - x2), for errata
264 * application purposes. If the revision-variant is between or includes the given
265 * values, this indicates that errata applies; otherwise not.
266 *
267 * Shall clobber: x0-x4
268 */
269	.globl	cpu_rev_var_range
270func cpu_rev_var_range
271	mov	x3, #ERRATA_APPLIES
272	mov	x4, #ERRATA_NOT_APPLIES
273	cmp	x0, x1
274	csel	x1, x3, x4, hs
275	cbz	x1, 1f
276	cmp	x0, x2
277	csel	x1, x3, x4, ls
2781:
279	mov	x0, x1
280	ret
281endfunc cpu_rev_var_range
282
283#if REPORT_ERRATA
284/*
285 * void print_errata_status(void);
286 *
287 * Function to print errata status for CPUs of its class. Must be called only:
288 *
289 *   - with MMU and data caches are enabled;
290 *   - after cpu_ops have been initialized in per-CPU data.
291 */
292	.globl print_errata_status
293func print_errata_status
294#ifdef IMAGE_BL1
295	/*
296	 * BL1 doesn't have per-CPU data. So retrieve the CPU operations
297	 * directly.
298	 */
299	stp	xzr, x30, [sp, #-16]!
300	bl	get_cpu_ops_ptr
301	ldp	xzr, x30, [sp], #16
302	ldr	x1, [x0, #CPU_ERRATA_FUNC]
303	cbnz	x1, .Lprint
304#else
305	/*
306	 * Retrieve pointer to cpu_ops from per-CPU data, and further, the
307	 * errata printing function. If it's non-NULL, jump to the function in
308	 * turn.
309	 */
310	mrs	x0, tpidr_el3
311#if ENABLE_ASSERTIONS
312	cmp	x0, #0
313	ASM_ASSERT(ne)
314#endif
315	ldr	x1, [x0, #CPU_DATA_CPU_OPS_PTR]
316#if ENABLE_ASSERTIONS
317	cmp	x1, #0
318	ASM_ASSERT(ne)
319#endif
320	ldr	x0, [x1, #CPU_ERRATA_FUNC]
321	cbz	x0, .Lnoprint
322
323	/*
324	 * Printing errata status requires atomically testing the printed flag.
325	 */
326	stp	x19, x30, [sp, #-16]!
327	mov	x19, x0
328
329	/*
330	 * Load pointers to errata lock and printed flag. Call
331	 * errata_needs_reporting to check whether this CPU needs to report
332	 * errata status pertaining to its class.
333	 */
334	ldr	x0, [x1, #CPU_ERRATA_LOCK]
335	ldr	x1, [x1, #CPU_ERRATA_PRINTED]
336	bl	errata_needs_reporting
337	mov	x1, x19
338	ldp	x19, x30, [sp], #16
339	cbnz	x0, .Lprint
340#endif
341.Lnoprint:
342	ret
343.Lprint:
344	/* Jump to errata reporting function for this CPU */
345	br	x1
346endfunc print_errata_status
347#endif
348
349/*
350 * int check_wa_cve_2017_5715(void);
351 *
352 * This function returns:
353 *  - ERRATA_APPLIES when firmware mitigation is required.
354 *  - ERRATA_NOT_APPLIES when firmware mitigation is _not_ required.
355 *  - ERRATA_MISSING when firmware mitigation would be required but
356 *    is not compiled in.
357 *
358 * NOTE: Must be called only after cpu_ops have been initialized
359 *       in per-CPU data.
360 */
361	.globl	check_wa_cve_2017_5715
362func check_wa_cve_2017_5715
363	mrs	x0, tpidr_el3
364#if ENABLE_ASSERTIONS
365	cmp	x0, #0
366	ASM_ASSERT(ne)
367#endif
368	ldr	x0, [x0, #CPU_DATA_CPU_OPS_PTR]
369#if ENABLE_ASSERTIONS
370	cmp	x0, #0
371	ASM_ASSERT(ne)
372#endif
373	ldr	x0, [x0, #CPU_EXTRA1_FUNC]
374	/*
375	 * If the reserved function pointer is NULL, this CPU
376	 * is unaffected by CVE-2017-5715 so bail out.
377	 */
378	cmp	x0, #CPU_NO_EXTRA1_FUNC
379	beq	1f
380	br	x0
3811:
382	mov	x0, #ERRATA_NOT_APPLIES
383	ret
384endfunc check_wa_cve_2017_5715
385
386/*
387 * void *wa_cve_2018_3639_get_disable_ptr(void);
388 *
389 * Returns a function pointer which is used to disable mitigation
390 * for CVE-2018-3639.
391 * The function pointer is only returned on cores that employ
392 * dynamic mitigation.  If the core uses static mitigation or is
393 * unaffected by CVE-2018-3639 this function returns NULL.
394 *
395 * NOTE: Must be called only after cpu_ops have been initialized
396 *       in per-CPU data.
397 */
398	.globl	wa_cve_2018_3639_get_disable_ptr
399func wa_cve_2018_3639_get_disable_ptr
400	mrs	x0, tpidr_el3
401#if ENABLE_ASSERTIONS
402	cmp	x0, #0
403	ASM_ASSERT(ne)
404#endif
405	ldr	x0, [x0, #CPU_DATA_CPU_OPS_PTR]
406#if ENABLE_ASSERTIONS
407	cmp	x0, #0
408	ASM_ASSERT(ne)
409#endif
410	ldr	x0, [x0, #CPU_EXTRA2_FUNC]
411	ret
412endfunc wa_cve_2018_3639_get_disable_ptr
413
414/*
415 * int check_smccc_arch_wa3_applies(void);
416 *
417 * This function checks whether SMCCC_ARCH_WORKAROUND_3 is enabled to mitigate
418 * CVE-2022-23960 for this CPU. It returns:
419 *  - ERRATA_APPLIES when SMCCC_ARCH_WORKAROUND_3 can be invoked to mitigate
420 *    the CVE.
421 *  - ERRATA_NOT_APPLIES when SMCCC_ARCH_WORKAROUND_3 should not be invoked to
422 *    mitigate the CVE.
423 *
424 * NOTE: Must be called only after cpu_ops have been initialized
425 *       in per-CPU data.
426 */
427	.globl	check_smccc_arch_wa3_applies
428func check_smccc_arch_wa3_applies
429	mrs	x0, tpidr_el3
430#if ENABLE_ASSERTIONS
431	cmp	x0, #0
432	ASM_ASSERT(ne)
433#endif
434	ldr	x0, [x0, #CPU_DATA_CPU_OPS_PTR]
435#if ENABLE_ASSERTIONS
436	cmp	x0, #0
437	ASM_ASSERT(ne)
438#endif
439	ldr	x0, [x0, #CPU_EXTRA3_FUNC]
440	/*
441	 * If the reserved function pointer is NULL, this CPU
442	 * is unaffected by CVE-2022-23960 so bail out.
443	 */
444	cmp	x0, #CPU_NO_EXTRA3_FUNC
445	beq	1f
446	br	x0
4471:
448	mov	x0, #ERRATA_NOT_APPLIES
449	ret
450endfunc check_smccc_arch_wa3_applies
451