xref: /rk3399_ARM-atf/lib/cpus/aarch32/cpu_helpers.S (revision 9a905a7d86867bab8a5d9befd40a67a6ab9aaea2)
1/*
2 * Copyright (c) 2016-2023, ARM Limited and Contributors. All rights reserved.
3 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 */
6
7#include <arch.h>
8#include <asm_macros.S>
9#include <assert_macros.S>
10#include <cpu_macros.S>
11#include <common/bl_common.h>
12#include <lib/el3_runtime/cpu_data.h>
13
14#if defined(IMAGE_BL1) || defined(IMAGE_BL32) || \
15	(defined(IMAGE_BL2) && RESET_TO_BL2)
16	/*
17	 * The reset handler common to all platforms.  After a matching
18	 * cpu_ops structure entry is found, the correponding reset_handler
19	 * in the cpu_ops is invoked. The reset handler is invoked very early
20	 * in the boot sequence and it is assumed that we can clobber r0 - r10
21	 * without the need to follow AAPCS.
22	 * Clobbers: r0 - r10
23	 */
24	.globl	reset_handler
25func reset_handler
26	mov	r8, lr
27
28	/* The plat_reset_handler can clobber r0 - r7 */
29	bl	plat_reset_handler
30
31	/* Get the matching cpu_ops pointer (clobbers: r0 - r5) */
32	bl	get_cpu_ops_ptr
33
34#if ENABLE_ASSERTIONS
35	cmp	r0, #0
36	ASM_ASSERT(ne)
37#endif
38
39	/* Get the cpu_ops reset handler */
40	ldr	r1, [r0, #CPU_RESET_FUNC]
41	cmp	r1, #0
42	mov	lr, r8
43	bxne	r1
44	bx	lr
45endfunc reset_handler
46
47#endif
48
49#ifdef IMAGE_BL32 /* The power down core and cluster is needed only in  BL32 */
50	/*
51	 * void prepare_cpu_pwr_dwn(unsigned int power_level)
52	 *
53	 * Prepare CPU power down function for all platforms. The function takes
54	 * a domain level to be powered down as its parameter. After the cpu_ops
55	 * pointer is retrieved from cpu_data, the handler for requested power
56	 * level is called.
57	 */
58	.globl	prepare_cpu_pwr_dwn
59func prepare_cpu_pwr_dwn
60	/*
61	 * If the given power level exceeds CPU_MAX_PWR_DWN_OPS, we call the
62	 * power down handler for the last power level
63	 */
64	mov	r2, #(CPU_MAX_PWR_DWN_OPS - 1)
65	cmp	r0, r2
66	movhi	r0, r2
67
68	push	{r0, lr}
69	bl	_cpu_data
70	pop	{r2, lr}
71
72	ldr	r0, [r0, #CPU_DATA_CPU_OPS_PTR]
73#if ENABLE_ASSERTIONS
74	cmp	r0, #0
75	ASM_ASSERT(ne)
76#endif
77
78	/* Get the appropriate power down handler */
79	mov	r1, #CPU_PWR_DWN_OPS
80	add	r1, r1, r2, lsl #2
81	ldr	r1, [r0, r1]
82#if ENABLE_ASSERTIONS
83	cmp	r1, #0
84	ASM_ASSERT(ne)
85#endif
86	bx	r1
87endfunc prepare_cpu_pwr_dwn
88
89	/*
90	 * Initializes the cpu_ops_ptr if not already initialized
91	 * in cpu_data. This must only be called after the data cache
92	 * is enabled. AAPCS is followed.
93	 */
94	.globl	init_cpu_ops
95func init_cpu_ops
96	push	{r4 - r6, lr}
97	bl	_cpu_data
98	mov	r6, r0
99	ldr	r1, [r0, #CPU_DATA_CPU_OPS_PTR]
100	cmp	r1, #0
101	bne	1f
102	bl	get_cpu_ops_ptr
103#if ENABLE_ASSERTIONS
104	cmp	r0, #0
105	ASM_ASSERT(ne)
106#endif
107	str	r0, [r6, #CPU_DATA_CPU_OPS_PTR]!
1081:
109	pop	{r4 - r6, pc}
110endfunc init_cpu_ops
111
112#endif /* IMAGE_BL32 */
113
114	/*
115	 * The below function returns the cpu_ops structure matching the
116	 * midr of the core. It reads the MIDR and finds the matching
117	 * entry in cpu_ops entries. Only the implementation and part number
118	 * are used to match the entries.
119	 * Return :
120	 *     r0 - The matching cpu_ops pointer on Success
121	 *     r0 - 0 on failure.
122	 * Clobbers: r0 - r5
123	 */
124	.globl	get_cpu_ops_ptr
125func get_cpu_ops_ptr
126	/* Get the cpu_ops start and end locations */
127	ldr	r4, =(__CPU_OPS_START__ + CPU_MIDR)
128	ldr	r5, =(__CPU_OPS_END__ + CPU_MIDR)
129
130	/* Initialize the return parameter */
131	mov	r0, #0
132
133	/* Read the MIDR_EL1 */
134	ldcopr	r2, MIDR
135	ldr	r3, =CPU_IMPL_PN_MASK
136
137	/* Retain only the implementation and part number using mask */
138	and	r2, r2, r3
1391:
140	/* Check if we have reached end of list */
141	cmp	r4, r5
142	bhs	error_exit
143
144	/* load the midr from the cpu_ops */
145	ldr	r1, [r4], #CPU_OPS_SIZE
146	and	r1, r1, r3
147
148	/* Check if midr matches to midr of this core */
149	cmp	r1, r2
150	bne	1b
151
152	/* Subtract the increment and offset to get the cpu-ops pointer */
153	sub	r0, r4, #(CPU_OPS_SIZE + CPU_MIDR)
154#if ENABLE_ASSERTIONS
155	cmp	r0, #0
156	ASM_ASSERT(ne)
157#endif
158error_exit:
159	bx	lr
160endfunc get_cpu_ops_ptr
161
162/*
163 * Extract CPU revision and variant, and combine them into a single numeric for
164 * easier comparison.
165 */
166	.globl	cpu_get_rev_var
167func cpu_get_rev_var
168	ldcopr	r1, MIDR
169
170	/*
171	 * Extract the variant[23:20] and revision[3:0] from r1 and pack it in
172	 * r0[0:7] as variant[7:4] and revision[3:0]:
173	 *
174	 * First extract r1[23:16] to r0[7:0] and zero fill the rest. Then
175	 * extract r1[3:0] into r0[3:0] retaining other bits.
176	 */
177	ubfx	r0, r1, #(MIDR_VAR_SHIFT - MIDR_REV_BITS), #(MIDR_REV_BITS + MIDR_VAR_BITS)
178	bfi	r0, r1, #MIDR_REV_SHIFT, #MIDR_REV_BITS
179	bx	lr
180endfunc cpu_get_rev_var
181
182/*
183 * Compare the CPU's revision-variant (r0) with a given value (r1), for errata
184 * application purposes. If the revision-variant is less than or same as a given
185 * value, indicates that errata applies; otherwise not.
186 */
187	.globl	cpu_rev_var_ls
188func cpu_rev_var_ls
189	cmp	r0, r1
190	movls	r0, #ERRATA_APPLIES
191	movhi	r0, #ERRATA_NOT_APPLIES
192	bx	lr
193endfunc cpu_rev_var_ls
194
195/*
196 * Compare the CPU's revision-variant (r0) with a given value (r1), for errata
197 * application purposes. If the revision-variant is higher than or same as a
198 * given value, indicates that errata applies; otherwise not.
199 */
200	.globl	cpu_rev_var_hs
201func cpu_rev_var_hs
202	cmp	r0, r1
203	movge	r0, #ERRATA_APPLIES
204	movlt	r0, #ERRATA_NOT_APPLIES
205	bx	lr
206endfunc cpu_rev_var_hs
207
208#if REPORT_ERRATA
209/*
210 * void print_errata_status(void);
211 *
212 * Function to print errata status for CPUs of its class. Must be called only:
213 *
214 *   - with MMU and data caches are enabled;
215 *   - after cpu_ops have been initialized in per-CPU data.
216 */
217	.globl print_errata_status
218func print_errata_status
219	/* r12 is pushed only for the sake of 8-byte stack alignment */
220	push	{r4, r5, r12, lr}
221#ifdef IMAGE_BL1
222	/*
223	 * BL1 doesn't have per-CPU data. So retrieve the CPU operations
224	 * directly.
225	 */
226	bl	get_cpu_ops_ptr
227	ldr	r0, [r0, #CPU_ERRATA_FUNC]
228	cmp	r0, #0
229	blxne	r0
230#else
231	/*
232	 * Retrieve pointer to cpu_ops, and further, the errata printing
233	 * function. If it's non-NULL, jump to the function in turn.
234	 */
235	bl	_cpu_data
236#if ENABLE_ASSERTIONS
237	cmp	r0, #0
238	ASM_ASSERT(ne)
239#endif
240	ldr	r1, [r0, #CPU_DATA_CPU_OPS_PTR]
241#if ENABLE_ASSERTIONS
242	cmp	r1, #0
243	ASM_ASSERT(ne)
244#endif
245	ldr	r0, [r1, #CPU_ERRATA_FUNC]
246	cmp	r0, #0
247	beq	1f
248
249	mov	r4, r0
250
251	/*
252	 * Load pointers to errata lock and printed flag. Call
253	 * errata_needs_reporting to check whether this CPU needs to report
254	 * errata status pertaining to its class.
255	 */
256	ldr	r0, [r1, #CPU_ERRATA_LOCK]
257	ldr	r1, [r1, #CPU_ERRATA_PRINTED]
258	bl	errata_needs_reporting
259	cmp	r0, #0
260	blxne	r4
2611:
262#endif
263	pop	{r4, r5, r12, pc}
264endfunc print_errata_status
265#endif
266