xref: /rk3399_ARM-atf/lib/cpus/aarch32/cpu_helpers.S (revision fd7b287cbe9147ca9e07dd9f30c49c58bbdd92a8)
1/*
2 * Copyright (c) 2016-2019, ARM Limited and Contributors. All rights reserved.
3 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 */
6
7#include <arch.h>
8#include <asm_macros.S>
9#include <assert_macros.S>
10#include <cpu_macros.S>
11#include <common/bl_common.h>
12#include <lib/el3_runtime/cpu_data.h>
13
14#if defined(IMAGE_BL1) || defined(IMAGE_BL32) || (defined(IMAGE_BL2) && BL2_AT_EL3)
15	/*
16	 * The reset handler common to all platforms.  After a matching
17	 * cpu_ops structure entry is found, the correponding reset_handler
18	 * in the cpu_ops is invoked. The reset handler is invoked very early
19	 * in the boot sequence and it is assumed that we can clobber r0 - r10
20	 * without the need to follow AAPCS.
21	 * Clobbers: r0 - r10
22	 */
23	.globl	reset_handler
24func reset_handler
25	mov	r8, lr
26
27	/* The plat_reset_handler can clobber r0 - r7 */
28	bl	plat_reset_handler
29
30	/* Get the matching cpu_ops pointer (clobbers: r0 - r5) */
31	bl	get_cpu_ops_ptr
32
33#if ENABLE_ASSERTIONS
34	cmp	r0, #0
35	ASM_ASSERT(ne)
36#endif
37
38	/* Get the cpu_ops reset handler */
39	ldr	r1, [r0, #CPU_RESET_FUNC]
40	cmp	r1, #0
41	mov	lr, r8
42	bxne	r1
43	bx	lr
44endfunc reset_handler
45
46#endif
47
48#ifdef IMAGE_BL32 /* The power down core and cluster is needed only in  BL32 */
49	/*
50	 * void prepare_cpu_pwr_dwn(unsigned int power_level)
51	 *
52	 * Prepare CPU power down function for all platforms. The function takes
53	 * a domain level to be powered down as its parameter. After the cpu_ops
54	 * pointer is retrieved from cpu_data, the handler for requested power
55	 * level is called.
56	 */
57	.globl	prepare_cpu_pwr_dwn
58func prepare_cpu_pwr_dwn
59	/*
60	 * If the given power level exceeds CPU_MAX_PWR_DWN_OPS, we call the
61	 * power down handler for the last power level
62	 */
63	mov	r2, #(CPU_MAX_PWR_DWN_OPS - 1)
64	cmp	r0, r2
65	movhi	r0, r2
66
67	push	{r0, lr}
68	bl	_cpu_data
69	pop	{r2, lr}
70
71	ldr	r0, [r0, #CPU_DATA_CPU_OPS_PTR]
72#if ENABLE_ASSERTIONS
73	cmp	r0, #0
74	ASM_ASSERT(ne)
75#endif
76
77	/* Get the appropriate power down handler */
78	mov	r1, #CPU_PWR_DWN_OPS
79	add	r1, r1, r2, lsl #2
80	ldr	r1, [r0, r1]
81	bx	r1
82endfunc prepare_cpu_pwr_dwn
83
84	/*
85	 * Initializes the cpu_ops_ptr if not already initialized
86	 * in cpu_data. This must only be called after the data cache
87	 * is enabled. AAPCS is followed.
88	 */
89	.globl	init_cpu_ops
90func init_cpu_ops
91	push	{r4 - r6, lr}
92	bl	_cpu_data
93	mov	r6, r0
94	ldr	r1, [r0, #CPU_DATA_CPU_OPS_PTR]
95	cmp	r1, #0
96	bne	1f
97	bl	get_cpu_ops_ptr
98#if ENABLE_ASSERTIONS
99	cmp	r0, #0
100	ASM_ASSERT(ne)
101#endif
102	str	r0, [r6, #CPU_DATA_CPU_OPS_PTR]!
1031:
104	pop	{r4 - r6, pc}
105endfunc init_cpu_ops
106
107#endif /* IMAGE_BL32 */
108
109	/*
110	 * The below function returns the cpu_ops structure matching the
111	 * midr of the core. It reads the MIDR and finds the matching
112	 * entry in cpu_ops entries. Only the implementation and part number
113	 * are used to match the entries.
114	 * Return :
115	 *     r0 - The matching cpu_ops pointer on Success
116	 *     r0 - 0 on failure.
117	 * Clobbers: r0 - r5
118	 */
119	.globl	get_cpu_ops_ptr
120func get_cpu_ops_ptr
121	/* Get the cpu_ops start and end locations */
122	ldr	r4, =(__CPU_OPS_START__ + CPU_MIDR)
123	ldr	r5, =(__CPU_OPS_END__ + CPU_MIDR)
124
125	/* Initialize the return parameter */
126	mov	r0, #0
127
128	/* Read the MIDR_EL1 */
129	ldcopr	r2, MIDR
130	ldr	r3, =CPU_IMPL_PN_MASK
131
132	/* Retain only the implementation and part number using mask */
133	and	r2, r2, r3
1341:
135	/* Check if we have reached end of list */
136	cmp	r4, r5
137	bhs	error_exit
138
139	/* load the midr from the cpu_ops */
140	ldr	r1, [r4], #CPU_OPS_SIZE
141	and	r1, r1, r3
142
143	/* Check if midr matches to midr of this core */
144	cmp	r1, r2
145	bne	1b
146
147	/* Subtract the increment and offset to get the cpu-ops pointer */
148	sub	r0, r4, #(CPU_OPS_SIZE + CPU_MIDR)
149error_exit:
150	bx	lr
151endfunc get_cpu_ops_ptr
152
153/*
154 * Extract CPU revision and variant, and combine them into a single numeric for
155 * easier comparison.
156 */
157	.globl	cpu_get_rev_var
158func cpu_get_rev_var
159	ldcopr	r1, MIDR
160
161	/*
162	 * Extract the variant[23:20] and revision[3:0] from r1 and pack it in
163	 * r0[0:7] as variant[7:4] and revision[3:0]:
164	 *
165	 * First extract r1[23:16] to r0[7:0] and zero fill the rest. Then
166	 * extract r1[3:0] into r0[3:0] retaining other bits.
167	 */
168	ubfx	r0, r1, #(MIDR_VAR_SHIFT - MIDR_REV_BITS), #(MIDR_REV_BITS + MIDR_VAR_BITS)
169	bfi	r0, r1, #MIDR_REV_SHIFT, #MIDR_REV_BITS
170	bx	lr
171endfunc cpu_get_rev_var
172
173/*
174 * Compare the CPU's revision-variant (r0) with a given value (r1), for errata
175 * application purposes. If the revision-variant is less than or same as a given
176 * value, indicates that errata applies; otherwise not.
177 */
178	.globl	cpu_rev_var_ls
179func cpu_rev_var_ls
180	cmp	r0, r1
181	movls	r0, #ERRATA_APPLIES
182	movhi	r0, #ERRATA_NOT_APPLIES
183	bx	lr
184endfunc cpu_rev_var_ls
185
186/*
187 * Compare the CPU's revision-variant (r0) with a given value (r1), for errata
188 * application purposes. If the revision-variant is higher than or same as a
189 * given value, indicates that errata applies; otherwise not.
190 */
191	.globl	cpu_rev_var_hs
192func cpu_rev_var_hs
193	cmp	r0, r1
194	movge	r0, #ERRATA_APPLIES
195	movlt	r0, #ERRATA_NOT_APPLIES
196	bx	lr
197endfunc cpu_rev_var_hs
198
199#if REPORT_ERRATA
200/*
201 * void print_errata_status(void);
202 *
203 * Function to print errata status for CPUs of its class. Must be called only:
204 *
205 *   - with MMU and data caches are enabled;
206 *   - after cpu_ops have been initialized in per-CPU data.
207 */
208	.globl print_errata_status
209func print_errata_status
210	/* r12 is pushed only for the sake of 8-byte stack alignment */
211	push	{r4, r5, r12, lr}
212#ifdef IMAGE_BL1
213	/*
214	 * BL1 doesn't have per-CPU data. So retrieve the CPU operations
215	 * directly.
216	 */
217	bl	get_cpu_ops_ptr
218	ldr	r0, [r0, #CPU_ERRATA_FUNC]
219	cmp	r0, #0
220	blxne	r0
221#else
222	/*
223	 * Retrieve pointer to cpu_ops, and further, the errata printing
224	 * function. If it's non-NULL, jump to the function in turn.
225	 */
226	bl	_cpu_data
227	ldr	r1, [r0, #CPU_DATA_CPU_OPS_PTR]
228	ldr	r0, [r1, #CPU_ERRATA_FUNC]
229	cmp	r0, #0
230	beq	1f
231
232	mov	r4, r0
233
234	/*
235	 * Load pointers to errata lock and printed flag. Call
236	 * errata_needs_reporting to check whether this CPU needs to report
237	 * errata status pertaining to its class.
238	 */
239	ldr	r0, [r1, #CPU_ERRATA_LOCK]
240	ldr	r1, [r1, #CPU_ERRATA_PRINTED]
241	bl	errata_needs_reporting
242	cmp	r0, #0
243	blxne	r4
2441:
245#endif
246	pop	{r4, r5, r12, pc}
247endfunc print_errata_status
248#endif
249