xref: /rk3399_ARM-atf/lib/cpus/aarch64/cpu_helpers.S (revision 023f1bed1dde23564e3b66a99c4a45b09e38992b)
1/*
2 * Copyright (c) 2014-2023, Arm Limited and Contributors. All rights reserved.
3 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 */
6
7#include <arch.h>
8#include <asm_macros.S>
9#include <assert_macros.S>
10#include <common/bl_common.h>
11#include <common/debug.h>
12#include <cpu_macros.S>
13#include <lib/cpus/errata_report.h>
14#include <lib/el3_runtime/cpu_data.h>
15
16 /* Reset fn is needed in BL at reset vector */
17#if defined(IMAGE_BL1) || defined(IMAGE_BL31) ||	\
18	(defined(IMAGE_BL2) && RESET_TO_BL2)
19	/*
20	 * The reset handler common to all platforms.  After a matching
21	 * cpu_ops structure entry is found, the correponding reset_handler
22	 * in the cpu_ops is invoked.
23	 * Clobbers: x0 - x19, x30
24	 */
25	.globl	reset_handler
26func reset_handler
27	mov	x19, x30
28
29	/* The plat_reset_handler can clobber x0 - x18, x30 */
30	bl	plat_reset_handler
31
32	/* Get the matching cpu_ops pointer */
33	bl	get_cpu_ops_ptr
34#if ENABLE_ASSERTIONS
35	cmp	x0, #0
36	ASM_ASSERT(ne)
37#endif
38
39	/* Get the cpu_ops reset handler */
40	ldr	x2, [x0, #CPU_RESET_FUNC]
41	mov	x30, x19
42	cbz	x2, 1f
43
44	/* The cpu_ops reset handler can clobber x0 - x19, x30 */
45	br	x2
461:
47	ret
48endfunc reset_handler
49
50#endif
51
52#ifdef IMAGE_BL31 /* The power down core and cluster is needed only in  BL31 */
53	/*
54	 * void prepare_cpu_pwr_dwn(unsigned int power_level)
55	 *
56	 * Prepare CPU power down function for all platforms. The function takes
57	 * a domain level to be powered down as its parameter. After the cpu_ops
58	 * pointer is retrieved from cpu_data, the handler for requested power
59	 * level is called.
60	 */
61	.globl	prepare_cpu_pwr_dwn
62func prepare_cpu_pwr_dwn
63	/*
64	 * If the given power level exceeds CPU_MAX_PWR_DWN_OPS, we call the
65	 * power down handler for the last power level
66	 */
67	mov_imm	x2, (CPU_MAX_PWR_DWN_OPS - 1)
68	cmp	x0, x2
69	csel	x2, x2, x0, hi
70
71	mrs	x1, tpidr_el3
72	ldr	x0, [x1, #CPU_DATA_CPU_OPS_PTR]
73#if ENABLE_ASSERTIONS
74	cmp	x0, #0
75	ASM_ASSERT(ne)
76#endif
77
78	/* Get the appropriate power down handler */
79	mov	x1, #CPU_PWR_DWN_OPS
80	add	x1, x1, x2, lsl #3
81	ldr	x1, [x0, x1]
82#if ENABLE_ASSERTIONS
83	cmp	x1, #0
84	ASM_ASSERT(ne)
85#endif
86	br	x1
87endfunc prepare_cpu_pwr_dwn
88
89
90	/*
91	 * Initializes the cpu_ops_ptr if not already initialized
92	 * in cpu_data. This can be called without a runtime stack, but may
93	 * only be called after the MMU is enabled.
94	 * clobbers: x0 - x6, x10
95	 */
96	.globl	init_cpu_ops
97func init_cpu_ops
98	mrs	x6, tpidr_el3
99	ldr	x0, [x6, #CPU_DATA_CPU_OPS_PTR]
100	cbnz	x0, 1f
101	mov	x10, x30
102	bl	get_cpu_ops_ptr
103#if ENABLE_ASSERTIONS
104	cmp	x0, #0
105	ASM_ASSERT(ne)
106#endif
107	str	x0, [x6, #CPU_DATA_CPU_OPS_PTR]!
108	mov x30, x10
1091:
110	ret
111endfunc init_cpu_ops
112#endif /* IMAGE_BL31 */
113
114#if defined(IMAGE_BL31) && CRASH_REPORTING
115	/*
116	 * The cpu specific registers which need to be reported in a crash
117	 * are reported via cpu_ops cpu_reg_dump function. After a matching
118	 * cpu_ops structure entry is found, the correponding cpu_reg_dump
119	 * in the cpu_ops is invoked.
120	 */
121	.globl	do_cpu_reg_dump
122func do_cpu_reg_dump
123	mov	x16, x30
124
125	/* Get the matching cpu_ops pointer */
126	bl	get_cpu_ops_ptr
127	cbz	x0, 1f
128
129	/* Get the cpu_ops cpu_reg_dump */
130	ldr	x2, [x0, #CPU_REG_DUMP]
131	cbz	x2, 1f
132	blr	x2
1331:
134	mov	x30, x16
135	ret
136endfunc do_cpu_reg_dump
137#endif
138
139	/*
140	 * The below function returns the cpu_ops structure matching the
141	 * midr of the core. It reads the MIDR_EL1 and finds the matching
142	 * entry in cpu_ops entries. Only the implementation and part number
143	 * are used to match the entries.
144	 *
145	 * If cpu_ops for the MIDR_EL1 cannot be found and
146	 * SUPPORT_UNKNOWN_MPID is enabled, it will try to look for a
147	 * default cpu_ops with an MIDR value of 0.
148	 * (Implementation number 0x0 should be reserved for software use
149	 * and therefore no clashes should happen with that default value).
150	 *
151	 * Return :
152	 *     x0 - The matching cpu_ops pointer on Success
153	 *     x0 - 0 on failure.
154	 * Clobbers : x0 - x5
155	 */
156	.globl	get_cpu_ops_ptr
157func get_cpu_ops_ptr
158	/* Read the MIDR_EL1 */
159	mrs	x2, midr_el1
160	mov_imm	x3, CPU_IMPL_PN_MASK
161
162	/* Retain only the implementation and part number using mask */
163	and	w2, w2, w3
164
165	/* Get the cpu_ops end location */
166	adr	x5, (__CPU_OPS_END__ + CPU_MIDR)
167
168	/* Initialize the return parameter */
169	mov	x0, #0
1701:
171	/* Get the cpu_ops start location */
172	adr	x4, (__CPU_OPS_START__ + CPU_MIDR)
173
1742:
175	/* Check if we have reached end of list */
176	cmp	x4, x5
177	b.eq	search_def_ptr
178
179	/* load the midr from the cpu_ops */
180	ldr	x1, [x4], #CPU_OPS_SIZE
181	and	w1, w1, w3
182
183	/* Check if midr matches to midr of this core */
184	cmp	w1, w2
185	b.ne	2b
186
187	/* Subtract the increment and offset to get the cpu-ops pointer */
188	sub	x0, x4, #(CPU_OPS_SIZE + CPU_MIDR)
189#if ENABLE_ASSERTIONS
190	cmp	x0, #0
191	ASM_ASSERT(ne)
192#endif
193#ifdef SUPPORT_UNKNOWN_MPID
194	cbnz	x2, exit_mpid_found
195	/* Mark the unsupported MPID flag */
196	adrp	x1, unsupported_mpid_flag
197	add	x1, x1, :lo12:unsupported_mpid_flag
198	str	w2, [x1]
199exit_mpid_found:
200#endif
201	ret
202
203	/*
204	 * Search again for a default pointer (MIDR = 0x0)
205	 * or return error if already searched.
206	 */
207search_def_ptr:
208#ifdef SUPPORT_UNKNOWN_MPID
209	cbz	x2, error_exit
210	mov	x2, #0
211	b	1b
212error_exit:
213#endif
214	ret
215endfunc get_cpu_ops_ptr
216
217/*
218 * Extract CPU revision and variant, and combine them into a single numeric for
219 * easier comparison.
220 */
221	.globl	cpu_get_rev_var
222func cpu_get_rev_var
223	mrs	x1, midr_el1
224
225	/*
226	 * Extract the variant[23:20] and revision[3:0] from MIDR, and pack them
227	 * as variant[7:4] and revision[3:0] of x0.
228	 *
229	 * First extract x1[23:16] to x0[7:0] and zero fill the rest. Then
230	 * extract x1[3:0] into x0[3:0] retaining other bits.
231	 */
232	ubfx	x0, x1, #(MIDR_VAR_SHIFT - MIDR_REV_BITS), #(MIDR_REV_BITS + MIDR_VAR_BITS)
233	bfxil	x0, x1, #MIDR_REV_SHIFT, #MIDR_REV_BITS
234	ret
235endfunc cpu_get_rev_var
236
237/*
238 * Compare the CPU's revision-variant (x0) with a given value (x1), for errata
239 * application purposes. If the revision-variant is less than or same as a given
240 * value, indicates that errata applies; otherwise not.
241 *
242 * Shall clobber: x0-x3
243 */
244	.globl	cpu_rev_var_ls
245func cpu_rev_var_ls
246	mov	x2, #ERRATA_APPLIES
247	mov	x3, #ERRATA_NOT_APPLIES
248	cmp	x0, x1
249	csel	x0, x2, x3, ls
250	ret
251endfunc cpu_rev_var_ls
252
253/*
254 * Compare the CPU's revision-variant (x0) with a given value (x1), for errata
255 * application purposes. If the revision-variant is higher than or same as a
256 * given value, indicates that errata applies; otherwise not.
257 *
258 * Shall clobber: x0-x3
259 */
260	.globl	cpu_rev_var_hs
261func cpu_rev_var_hs
262	mov	x2, #ERRATA_APPLIES
263	mov	x3, #ERRATA_NOT_APPLIES
264	cmp	x0, x1
265	csel	x0, x2, x3, hs
266	ret
267endfunc cpu_rev_var_hs
268
269/*
270 * Compare the CPU's revision-variant (x0) with a given range (x1 - x2), for errata
271 * application purposes. If the revision-variant is between or includes the given
272 * values, this indicates that errata applies; otherwise not.
273 *
274 * Shall clobber: x0-x4
275 */
276	.globl	cpu_rev_var_range
277func cpu_rev_var_range
278	mov	x3, #ERRATA_APPLIES
279	mov	x4, #ERRATA_NOT_APPLIES
280	cmp	x0, x1
281	csel	x1, x3, x4, hs
282	cbz	x1, 1f
283	cmp	x0, x2
284	csel	x1, x3, x4, ls
2851:
286	mov	x0, x1
287	ret
288endfunc cpu_rev_var_range
289
290#if REPORT_ERRATA
291/*
292 * void print_errata_status(void);
293 *
294 * Function to print errata status for CPUs of its class. Must be called only:
295 *
296 *   - with MMU and data caches are enabled;
297 *   - after cpu_ops have been initialized in per-CPU data.
298 */
299	.globl print_errata_status
300func print_errata_status
301#ifdef IMAGE_BL1
302	/*
303	 * BL1 doesn't have per-CPU data. So retrieve the CPU operations
304	 * directly.
305	 */
306	stp	xzr, x30, [sp, #-16]!
307	bl	get_cpu_ops_ptr
308	ldp	xzr, x30, [sp], #16
309	ldr	x1, [x0, #CPU_ERRATA_FUNC]
310	cbnz	x1, .Lprint
311#else
312	/*
313	 * Retrieve pointer to cpu_ops from per-CPU data, and further, the
314	 * errata printing function. If it's non-NULL, jump to the function in
315	 * turn.
316	 */
317	mrs	x0, tpidr_el3
318#if ENABLE_ASSERTIONS
319	cmp	x0, #0
320	ASM_ASSERT(ne)
321#endif
322	ldr	x1, [x0, #CPU_DATA_CPU_OPS_PTR]
323#if ENABLE_ASSERTIONS
324	cmp	x1, #0
325	ASM_ASSERT(ne)
326#endif
327	ldr	x0, [x1, #CPU_ERRATA_FUNC]
328	cbz	x0, .Lnoprint
329
330	/*
331	 * Printing errata status requires atomically testing the printed flag.
332	 */
333	stp	x19, x30, [sp, #-16]!
334	mov	x19, x0
335
336	/*
337	 * Load pointers to errata lock and printed flag. Call
338	 * errata_needs_reporting to check whether this CPU needs to report
339	 * errata status pertaining to its class.
340	 */
341	ldr	x0, [x1, #CPU_ERRATA_LOCK]
342	ldr	x1, [x1, #CPU_ERRATA_PRINTED]
343	bl	errata_needs_reporting
344	mov	x1, x19
345	ldp	x19, x30, [sp], #16
346	cbnz	x0, .Lprint
347#endif
348.Lnoprint:
349	ret
350.Lprint:
351	/* Jump to errata reporting function for this CPU */
352	br	x1
353endfunc print_errata_status
354#endif
355
356/*
357 * int check_wa_cve_2017_5715(void);
358 *
359 * This function returns:
360 *  - ERRATA_APPLIES when firmware mitigation is required.
361 *  - ERRATA_NOT_APPLIES when firmware mitigation is _not_ required.
362 *  - ERRATA_MISSING when firmware mitigation would be required but
363 *    is not compiled in.
364 *
365 * NOTE: Must be called only after cpu_ops have been initialized
366 *       in per-CPU data.
367 */
368	.globl	check_wa_cve_2017_5715
369func check_wa_cve_2017_5715
370	mrs	x0, tpidr_el3
371#if ENABLE_ASSERTIONS
372	cmp	x0, #0
373	ASM_ASSERT(ne)
374#endif
375	ldr	x0, [x0, #CPU_DATA_CPU_OPS_PTR]
376#if ENABLE_ASSERTIONS
377	cmp	x0, #0
378	ASM_ASSERT(ne)
379#endif
380	ldr	x0, [x0, #CPU_EXTRA1_FUNC]
381	/*
382	 * If the reserved function pointer is NULL, this CPU
383	 * is unaffected by CVE-2017-5715 so bail out.
384	 */
385	cmp	x0, #CPU_NO_EXTRA1_FUNC
386	beq	1f
387	br	x0
3881:
389	mov	x0, #ERRATA_NOT_APPLIES
390	ret
391endfunc check_wa_cve_2017_5715
392
393/*
394 * void *wa_cve_2018_3639_get_disable_ptr(void);
395 *
396 * Returns a function pointer which is used to disable mitigation
397 * for CVE-2018-3639.
398 * The function pointer is only returned on cores that employ
399 * dynamic mitigation.  If the core uses static mitigation or is
400 * unaffected by CVE-2018-3639 this function returns NULL.
401 *
402 * NOTE: Must be called only after cpu_ops have been initialized
403 *       in per-CPU data.
404 */
405	.globl	wa_cve_2018_3639_get_disable_ptr
406func wa_cve_2018_3639_get_disable_ptr
407	mrs	x0, tpidr_el3
408#if ENABLE_ASSERTIONS
409	cmp	x0, #0
410	ASM_ASSERT(ne)
411#endif
412	ldr	x0, [x0, #CPU_DATA_CPU_OPS_PTR]
413#if ENABLE_ASSERTIONS
414	cmp	x0, #0
415	ASM_ASSERT(ne)
416#endif
417	ldr	x0, [x0, #CPU_EXTRA2_FUNC]
418	ret
419endfunc wa_cve_2018_3639_get_disable_ptr
420
421/*
422 * int check_smccc_arch_wa3_applies(void);
423 *
424 * This function checks whether SMCCC_ARCH_WORKAROUND_3 is enabled to mitigate
425 * CVE-2022-23960 for this CPU. It returns:
426 *  - ERRATA_APPLIES when SMCCC_ARCH_WORKAROUND_3 can be invoked to mitigate
427 *    the CVE.
428 *  - ERRATA_NOT_APPLIES when SMCCC_ARCH_WORKAROUND_3 should not be invoked to
429 *    mitigate the CVE.
430 *
431 * NOTE: Must be called only after cpu_ops have been initialized
432 *       in per-CPU data.
433 */
434	.globl	check_smccc_arch_wa3_applies
435func check_smccc_arch_wa3_applies
436	mrs	x0, tpidr_el3
437#if ENABLE_ASSERTIONS
438	cmp	x0, #0
439	ASM_ASSERT(ne)
440#endif
441	ldr	x0, [x0, #CPU_DATA_CPU_OPS_PTR]
442#if ENABLE_ASSERTIONS
443	cmp	x0, #0
444	ASM_ASSERT(ne)
445#endif
446	ldr	x0, [x0, #CPU_EXTRA3_FUNC]
447	/*
448	 * If the reserved function pointer is NULL, this CPU
449	 * is unaffected by CVE-2022-23960 so bail out.
450	 */
451	cmp	x0, #CPU_NO_EXTRA3_FUNC
452	beq	1f
453	br	x0
4541:
455	mov	x0, #ERRATA_NOT_APPLIES
456	ret
457endfunc check_smccc_arch_wa3_applies
458