xref: /rk3399_ARM-atf/lib/cpus/aarch32/cpu_helpers.S (revision 42d4d3baacb3b11c68163ec85de1bf2e34e0c882)
1e33b78a6SSoby Mathew/*
2*42d4d3baSArvind Ram Prakash * Copyright (c) 2016-2023, ARM Limited and Contributors. All rights reserved.
3e33b78a6SSoby Mathew *
482cb2c1aSdp-arm * SPDX-License-Identifier: BSD-3-Clause
5e33b78a6SSoby Mathew */
6e33b78a6SSoby Mathew
7e33b78a6SSoby Mathew#include <arch.h>
8e33b78a6SSoby Mathew#include <asm_macros.S>
9e33b78a6SSoby Mathew#include <assert_macros.S>
10e33b78a6SSoby Mathew#include <cpu_macros.S>
11c2ad38ceSVarun Wadekar#include <common/bl_common.h>
1209d40e0eSAntonio Nino Diaz#include <lib/el3_runtime/cpu_data.h>
13e33b78a6SSoby Mathew
14*42d4d3baSArvind Ram Prakash#if defined(IMAGE_BL1) || defined(IMAGE_BL32) || \
15*42d4d3baSArvind Ram Prakash	(defined(IMAGE_BL2) && RESET_TO_BL2)
16e33b78a6SSoby Mathew	/*
17e33b78a6SSoby Mathew	 * The reset handler common to all platforms.  After a matching
18e33b78a6SSoby Mathew	 * cpu_ops structure entry is found, the correponding reset_handler
19e33b78a6SSoby Mathew	 * in the cpu_ops is invoked. The reset handler is invoked very early
20e33b78a6SSoby Mathew	 * in the boot sequence and it is assumed that we can clobber r0 - r10
21e33b78a6SSoby Mathew	 * without the need to follow AAPCS.
22e33b78a6SSoby Mathew	 * Clobbers: r0 - r10
23e33b78a6SSoby Mathew	 */
24e33b78a6SSoby Mathew	.globl	reset_handler
25e33b78a6SSoby Mathewfunc reset_handler
26c6c10b02SHeiko Stuebner	mov	r8, lr
27e33b78a6SSoby Mathew
28c6c10b02SHeiko Stuebner	/* The plat_reset_handler can clobber r0 - r7 */
29e33b78a6SSoby Mathew	bl	plat_reset_handler
30e33b78a6SSoby Mathew
31e33b78a6SSoby Mathew	/* Get the matching cpu_ops pointer (clobbers: r0 - r5) */
32e33b78a6SSoby Mathew	bl	get_cpu_ops_ptr
33e33b78a6SSoby Mathew
34044bb2faSAntonio Nino Diaz#if ENABLE_ASSERTIONS
35e33b78a6SSoby Mathew	cmp	r0, #0
36e33b78a6SSoby Mathew	ASM_ASSERT(ne)
37e33b78a6SSoby Mathew#endif
38e33b78a6SSoby Mathew
39e33b78a6SSoby Mathew	/* Get the cpu_ops reset handler */
40e33b78a6SSoby Mathew	ldr	r1, [r0, #CPU_RESET_FUNC]
41e33b78a6SSoby Mathew	cmp	r1, #0
42c6c10b02SHeiko Stuebner	mov	lr, r8
43e33b78a6SSoby Mathew	bxne	r1
44e33b78a6SSoby Mathew	bx	lr
45e33b78a6SSoby Mathewendfunc reset_handler
46e33b78a6SSoby Mathew
47b1d27b48SRoberto Vargas#endif
481a0a3f06SYatharth Kochar
493d8256b2SMasahiro Yamada#ifdef IMAGE_BL32 /* The power down core and cluster is needed only in  BL32 */
50e33b78a6SSoby Mathew	/*
515dd9dbb5SJeenu Viswambharan	 * void prepare_cpu_pwr_dwn(unsigned int power_level)
525dd9dbb5SJeenu Viswambharan	 *
535dd9dbb5SJeenu Viswambharan	 * Prepare CPU power down function for all platforms. The function takes
545dd9dbb5SJeenu Viswambharan	 * a domain level to be powered down as its parameter. After the cpu_ops
555dd9dbb5SJeenu Viswambharan	 * pointer is retrieved from cpu_data, the handler for requested power
565dd9dbb5SJeenu Viswambharan	 * level is called.
57e33b78a6SSoby Mathew	 */
585dd9dbb5SJeenu Viswambharan	.globl	prepare_cpu_pwr_dwn
595dd9dbb5SJeenu Viswambharanfunc prepare_cpu_pwr_dwn
60e33b78a6SSoby Mathew	/*
615dd9dbb5SJeenu Viswambharan	 * If the given power level exceeds CPU_MAX_PWR_DWN_OPS, we call the
625dd9dbb5SJeenu Viswambharan	 * power down handler for the last power level
63e33b78a6SSoby Mathew	 */
645dd9dbb5SJeenu Viswambharan	mov	r2, #(CPU_MAX_PWR_DWN_OPS - 1)
655dd9dbb5SJeenu Viswambharan	cmp	r0, r2
665dd9dbb5SJeenu Viswambharan	movhi	r0, r2
67e33b78a6SSoby Mathew
685dd9dbb5SJeenu Viswambharan	push	{r0, lr}
695dd9dbb5SJeenu Viswambharan	bl	_cpu_data
705dd9dbb5SJeenu Viswambharan	pop	{r2, lr}
715dd9dbb5SJeenu Viswambharan
725dd9dbb5SJeenu Viswambharan	ldr	r0, [r0, #CPU_DATA_CPU_OPS_PTR]
73044bb2faSAntonio Nino Diaz#if ENABLE_ASSERTIONS
745dd9dbb5SJeenu Viswambharan	cmp	r0, #0
75e33b78a6SSoby Mathew	ASM_ASSERT(ne)
76e33b78a6SSoby Mathew#endif
77e33b78a6SSoby Mathew
785dd9dbb5SJeenu Viswambharan	/* Get the appropriate power down handler */
795dd9dbb5SJeenu Viswambharan	mov	r1, #CPU_PWR_DWN_OPS
805dd9dbb5SJeenu Viswambharan	add	r1, r1, r2, lsl #2
815dd9dbb5SJeenu Viswambharan	ldr	r1, [r0, r1]
82e3b9cc12SYann Gautier#if ENABLE_ASSERTIONS
83e3b9cc12SYann Gautier	cmp	r1, #0
84e3b9cc12SYann Gautier	ASM_ASSERT(ne)
85e3b9cc12SYann Gautier#endif
865dd9dbb5SJeenu Viswambharan	bx	r1
875dd9dbb5SJeenu Viswambharanendfunc prepare_cpu_pwr_dwn
88e33b78a6SSoby Mathew
89e33b78a6SSoby Mathew	/*
90e33b78a6SSoby Mathew	 * Initializes the cpu_ops_ptr if not already initialized
91e33b78a6SSoby Mathew	 * in cpu_data. This must only be called after the data cache
92e33b78a6SSoby Mathew	 * is enabled. AAPCS is followed.
93e33b78a6SSoby Mathew	 */
94e33b78a6SSoby Mathew	.globl	init_cpu_ops
95e33b78a6SSoby Mathewfunc init_cpu_ops
96e33b78a6SSoby Mathew	push	{r4 - r6, lr}
97e33b78a6SSoby Mathew	bl	_cpu_data
98e33b78a6SSoby Mathew	mov	r6, r0
99e33b78a6SSoby Mathew	ldr	r1, [r0, #CPU_DATA_CPU_OPS_PTR]
100e33b78a6SSoby Mathew	cmp	r1, #0
101e33b78a6SSoby Mathew	bne	1f
102e33b78a6SSoby Mathew	bl	get_cpu_ops_ptr
103044bb2faSAntonio Nino Diaz#if ENABLE_ASSERTIONS
104e33b78a6SSoby Mathew	cmp	r0, #0
105e33b78a6SSoby Mathew	ASM_ASSERT(ne)
106e33b78a6SSoby Mathew#endif
107e33b78a6SSoby Mathew	str	r0, [r6, #CPU_DATA_CPU_OPS_PTR]!
108e33b78a6SSoby Mathew1:
109e33b78a6SSoby Mathew	pop	{r4 - r6, pc}
110e33b78a6SSoby Mathewendfunc init_cpu_ops
111e33b78a6SSoby Mathew
1121a0a3f06SYatharth Kochar#endif /* IMAGE_BL32 */
1131a0a3f06SYatharth Kochar
114e33b78a6SSoby Mathew	/*
115e33b78a6SSoby Mathew	 * The below function returns the cpu_ops structure matching the
116e33b78a6SSoby Mathew	 * midr of the core. It reads the MIDR and finds the matching
117e33b78a6SSoby Mathew	 * entry in cpu_ops entries. Only the implementation and part number
118e33b78a6SSoby Mathew	 * are used to match the entries.
119e33b78a6SSoby Mathew	 * Return :
120e33b78a6SSoby Mathew	 *     r0 - The matching cpu_ops pointer on Success
121e33b78a6SSoby Mathew	 *     r0 - 0 on failure.
122e33b78a6SSoby Mathew	 * Clobbers: r0 - r5
123e33b78a6SSoby Mathew	 */
124e33b78a6SSoby Mathew	.globl	get_cpu_ops_ptr
125e33b78a6SSoby Mathewfunc get_cpu_ops_ptr
126e33b78a6SSoby Mathew	/* Get the cpu_ops start and end locations */
127e33b78a6SSoby Mathew	ldr	r4, =(__CPU_OPS_START__ + CPU_MIDR)
128e33b78a6SSoby Mathew	ldr	r5, =(__CPU_OPS_END__ + CPU_MIDR)
129e33b78a6SSoby Mathew
130e33b78a6SSoby Mathew	/* Initialize the return parameter */
131e33b78a6SSoby Mathew	mov	r0, #0
132e33b78a6SSoby Mathew
133e33b78a6SSoby Mathew	/* Read the MIDR_EL1 */
134e33b78a6SSoby Mathew	ldcopr	r2, MIDR
135e33b78a6SSoby Mathew	ldr	r3, =CPU_IMPL_PN_MASK
136e33b78a6SSoby Mathew
137e33b78a6SSoby Mathew	/* Retain only the implementation and part number using mask */
138e33b78a6SSoby Mathew	and	r2, r2, r3
139e33b78a6SSoby Mathew1:
140e33b78a6SSoby Mathew	/* Check if we have reached end of list */
141e33b78a6SSoby Mathew	cmp	r4, r5
142355a5d03SDouglas Raillard	bhs	error_exit
143e33b78a6SSoby Mathew
144e33b78a6SSoby Mathew	/* load the midr from the cpu_ops */
145e33b78a6SSoby Mathew	ldr	r1, [r4], #CPU_OPS_SIZE
146e33b78a6SSoby Mathew	and	r1, r1, r3
147e33b78a6SSoby Mathew
148e33b78a6SSoby Mathew	/* Check if midr matches to midr of this core */
149e33b78a6SSoby Mathew	cmp	r1, r2
150e33b78a6SSoby Mathew	bne	1b
151e33b78a6SSoby Mathew
152e33b78a6SSoby Mathew	/* Subtract the increment and offset to get the cpu-ops pointer */
153e33b78a6SSoby Mathew	sub	r0, r4, #(CPU_OPS_SIZE + CPU_MIDR)
154e3b9cc12SYann Gautier#if ENABLE_ASSERTIONS
155e3b9cc12SYann Gautier	cmp	r0, #0
156e3b9cc12SYann Gautier	ASM_ASSERT(ne)
157e3b9cc12SYann Gautier#endif
158e33b78a6SSoby Mathewerror_exit:
159e33b78a6SSoby Mathew	bx	lr
160e33b78a6SSoby Mathewendfunc get_cpu_ops_ptr
16110bcd761SJeenu Viswambharan
16210bcd761SJeenu Viswambharan/*
16310bcd761SJeenu Viswambharan * Extract CPU revision and variant, and combine them into a single numeric for
16410bcd761SJeenu Viswambharan * easier comparison.
16510bcd761SJeenu Viswambharan */
16610bcd761SJeenu Viswambharan	.globl	cpu_get_rev_var
16710bcd761SJeenu Viswambharanfunc cpu_get_rev_var
16810bcd761SJeenu Viswambharan	ldcopr	r1, MIDR
16910bcd761SJeenu Viswambharan
17010bcd761SJeenu Viswambharan	/*
17110bcd761SJeenu Viswambharan	 * Extract the variant[23:20] and revision[3:0] from r1 and pack it in
17210bcd761SJeenu Viswambharan	 * r0[0:7] as variant[7:4] and revision[3:0]:
17310bcd761SJeenu Viswambharan	 *
17410bcd761SJeenu Viswambharan	 * First extract r1[23:16] to r0[7:0] and zero fill the rest. Then
17510bcd761SJeenu Viswambharan	 * extract r1[3:0] into r0[3:0] retaining other bits.
17610bcd761SJeenu Viswambharan	 */
17710bcd761SJeenu Viswambharan	ubfx	r0, r1, #(MIDR_VAR_SHIFT - MIDR_REV_BITS), #(MIDR_REV_BITS + MIDR_VAR_BITS)
17810bcd761SJeenu Viswambharan	bfi	r0, r1, #MIDR_REV_SHIFT, #MIDR_REV_BITS
17910bcd761SJeenu Viswambharan	bx	lr
18010bcd761SJeenu Viswambharanendfunc cpu_get_rev_var
18110bcd761SJeenu Viswambharan
18210bcd761SJeenu Viswambharan/*
18310bcd761SJeenu Viswambharan * Compare the CPU's revision-variant (r0) with a given value (r1), for errata
18410bcd761SJeenu Viswambharan * application purposes. If the revision-variant is less than or same as a given
18510bcd761SJeenu Viswambharan * value, indicates that errata applies; otherwise not.
18610bcd761SJeenu Viswambharan */
18710bcd761SJeenu Viswambharan	.globl	cpu_rev_var_ls
18810bcd761SJeenu Viswambharanfunc cpu_rev_var_ls
18910bcd761SJeenu Viswambharan	cmp	r0, r1
19010bcd761SJeenu Viswambharan	movls	r0, #ERRATA_APPLIES
19110bcd761SJeenu Viswambharan	movhi	r0, #ERRATA_NOT_APPLIES
19210bcd761SJeenu Viswambharan	bx	lr
19310bcd761SJeenu Viswambharanendfunc cpu_rev_var_ls
19410bcd761SJeenu Viswambharan
19556e04999SDimitris Papastamos/*
19656e04999SDimitris Papastamos * Compare the CPU's revision-variant (r0) with a given value (r1), for errata
19756e04999SDimitris Papastamos * application purposes. If the revision-variant is higher than or same as a
19856e04999SDimitris Papastamos * given value, indicates that errata applies; otherwise not.
19956e04999SDimitris Papastamos */
20056e04999SDimitris Papastamos	.globl	cpu_rev_var_hs
20156e04999SDimitris Papastamosfunc cpu_rev_var_hs
20256e04999SDimitris Papastamos	cmp	r0, r1
20356e04999SDimitris Papastamos	movge	r0, #ERRATA_APPLIES
20456e04999SDimitris Papastamos	movlt	r0, #ERRATA_NOT_APPLIES
20556e04999SDimitris Papastamos	bx	lr
20656e04999SDimitris Papastamosendfunc cpu_rev_var_hs
20756e04999SDimitris Papastamos
20810bcd761SJeenu Viswambharan#if REPORT_ERRATA
20910bcd761SJeenu Viswambharan/*
21010bcd761SJeenu Viswambharan * void print_errata_status(void);
21110bcd761SJeenu Viswambharan *
21210bcd761SJeenu Viswambharan * Function to print errata status for CPUs of its class. Must be called only:
21310bcd761SJeenu Viswambharan *
21410bcd761SJeenu Viswambharan *   - with MMU and data caches are enabled;
21510bcd761SJeenu Viswambharan *   - after cpu_ops have been initialized in per-CPU data.
21610bcd761SJeenu Viswambharan */
21710bcd761SJeenu Viswambharan	.globl print_errata_status
21810bcd761SJeenu Viswambharanfunc print_errata_status
2197af7038eSSoby Mathew	/* r12 is pushed only for the sake of 8-byte stack alignment */
2207af7038eSSoby Mathew	push	{r4, r5, r12, lr}
22110bcd761SJeenu Viswambharan#ifdef IMAGE_BL1
22210bcd761SJeenu Viswambharan	/*
22310bcd761SJeenu Viswambharan	 * BL1 doesn't have per-CPU data. So retrieve the CPU operations
22410bcd761SJeenu Viswambharan	 * directly.
22510bcd761SJeenu Viswambharan	 */
22610bcd761SJeenu Viswambharan	bl	get_cpu_ops_ptr
22710bcd761SJeenu Viswambharan	ldr	r0, [r0, #CPU_ERRATA_FUNC]
22810bcd761SJeenu Viswambharan	cmp	r0, #0
22910bcd761SJeenu Viswambharan	blxne	r0
23010bcd761SJeenu Viswambharan#else
23110bcd761SJeenu Viswambharan	/*
23210bcd761SJeenu Viswambharan	 * Retrieve pointer to cpu_ops, and further, the errata printing
23310bcd761SJeenu Viswambharan	 * function. If it's non-NULL, jump to the function in turn.
23410bcd761SJeenu Viswambharan	 */
23510bcd761SJeenu Viswambharan	bl	_cpu_data
236e3b9cc12SYann Gautier#if ENABLE_ASSERTIONS
237e3b9cc12SYann Gautier	cmp	r0, #0
238e3b9cc12SYann Gautier	ASM_ASSERT(ne)
239e3b9cc12SYann Gautier#endif
24010bcd761SJeenu Viswambharan	ldr	r1, [r0, #CPU_DATA_CPU_OPS_PTR]
241e3b9cc12SYann Gautier#if ENABLE_ASSERTIONS
242e3b9cc12SYann Gautier	cmp	r1, #0
243e3b9cc12SYann Gautier	ASM_ASSERT(ne)
244e3b9cc12SYann Gautier#endif
24510bcd761SJeenu Viswambharan	ldr	r0, [r1, #CPU_ERRATA_FUNC]
24610bcd761SJeenu Viswambharan	cmp	r0, #0
24710bcd761SJeenu Viswambharan	beq	1f
24810bcd761SJeenu Viswambharan
24910bcd761SJeenu Viswambharan	mov	r4, r0
25010bcd761SJeenu Viswambharan
25110bcd761SJeenu Viswambharan	/*
25210bcd761SJeenu Viswambharan	 * Load pointers to errata lock and printed flag. Call
25310bcd761SJeenu Viswambharan	 * errata_needs_reporting to check whether this CPU needs to report
25410bcd761SJeenu Viswambharan	 * errata status pertaining to its class.
25510bcd761SJeenu Viswambharan	 */
25610bcd761SJeenu Viswambharan	ldr	r0, [r1, #CPU_ERRATA_LOCK]
25710bcd761SJeenu Viswambharan	ldr	r1, [r1, #CPU_ERRATA_PRINTED]
25810bcd761SJeenu Viswambharan	bl	errata_needs_reporting
25910bcd761SJeenu Viswambharan	cmp	r0, #0
26010bcd761SJeenu Viswambharan	blxne	r4
26110bcd761SJeenu Viswambharan1:
26210bcd761SJeenu Viswambharan#endif
2637af7038eSSoby Mathew	pop	{r4, r5, r12, pc}
26410bcd761SJeenu Viswambharanendfunc print_errata_status
26510bcd761SJeenu Viswambharan#endif
266