xref: /rk3399_ARM-atf/lib/cpus/aarch32/cpu_helpers.S (revision c948f77136c42a92d0bb660543a3600c36dcf7f1)
1/*
2 * Copyright (c) 2016-2017, ARM Limited and Contributors. All rights reserved.
3 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 */
6
7#include <arch.h>
8#include <asm_macros.S>
9#include <assert_macros.S>
10#include <cpu_macros.S>
11#include <lib/el3_runtime/cpu_data.h>
12
13#if defined(IMAGE_BL1) || defined(IMAGE_BL32) || (defined(IMAGE_BL2) && BL2_AT_EL3)
14	/*
15	 * The reset handler common to all platforms.  After a matching
16	 * cpu_ops structure entry is found, the correponding reset_handler
17	 * in the cpu_ops is invoked. The reset handler is invoked very early
18	 * in the boot sequence and it is assumed that we can clobber r0 - r10
19	 * without the need to follow AAPCS.
20	 * Clobbers: r0 - r10
21	 */
22	.globl	reset_handler
23func reset_handler
24	mov	r10, lr
25
26	/* The plat_reset_handler can clobber r0 - r9 */
27	bl	plat_reset_handler
28
29	/* Get the matching cpu_ops pointer (clobbers: r0 - r5) */
30	bl	get_cpu_ops_ptr
31
32#if ENABLE_ASSERTIONS
33	cmp	r0, #0
34	ASM_ASSERT(ne)
35#endif
36
37	/* Get the cpu_ops reset handler */
38	ldr	r1, [r0, #CPU_RESET_FUNC]
39	cmp	r1, #0
40	mov	lr, r10
41	bxne	r1
42	bx	lr
43endfunc reset_handler
44
45#endif
46
47#ifdef IMAGE_BL32 /* The power down core and cluster is needed only in  BL32 */
48	/*
49	 * void prepare_cpu_pwr_dwn(unsigned int power_level)
50	 *
51	 * Prepare CPU power down function for all platforms. The function takes
52	 * a domain level to be powered down as its parameter. After the cpu_ops
53	 * pointer is retrieved from cpu_data, the handler for requested power
54	 * level is called.
55	 */
56	.globl	prepare_cpu_pwr_dwn
57func prepare_cpu_pwr_dwn
58	/*
59	 * If the given power level exceeds CPU_MAX_PWR_DWN_OPS, we call the
60	 * power down handler for the last power level
61	 */
62	mov	r2, #(CPU_MAX_PWR_DWN_OPS - 1)
63	cmp	r0, r2
64	movhi	r0, r2
65
66	push	{r0, lr}
67	bl	_cpu_data
68	pop	{r2, lr}
69
70	ldr	r0, [r0, #CPU_DATA_CPU_OPS_PTR]
71#if ENABLE_ASSERTIONS
72	cmp	r0, #0
73	ASM_ASSERT(ne)
74#endif
75
76	/* Get the appropriate power down handler */
77	mov	r1, #CPU_PWR_DWN_OPS
78	add	r1, r1, r2, lsl #2
79	ldr	r1, [r0, r1]
80	bx	r1
81endfunc prepare_cpu_pwr_dwn
82
83	/*
84	 * Initializes the cpu_ops_ptr if not already initialized
85	 * in cpu_data. This must only be called after the data cache
86	 * is enabled. AAPCS is followed.
87	 */
88	.globl	init_cpu_ops
89func init_cpu_ops
90	push	{r4 - r6, lr}
91	bl	_cpu_data
92	mov	r6, r0
93	ldr	r1, [r0, #CPU_DATA_CPU_OPS_PTR]
94	cmp	r1, #0
95	bne	1f
96	bl	get_cpu_ops_ptr
97#if ENABLE_ASSERTIONS
98	cmp	r0, #0
99	ASM_ASSERT(ne)
100#endif
101	str	r0, [r6, #CPU_DATA_CPU_OPS_PTR]!
1021:
103	pop	{r4 - r6, pc}
104endfunc init_cpu_ops
105
106#endif /* IMAGE_BL32 */
107
108	/*
109	 * The below function returns the cpu_ops structure matching the
110	 * midr of the core. It reads the MIDR and finds the matching
111	 * entry in cpu_ops entries. Only the implementation and part number
112	 * are used to match the entries.
113	 * Return :
114	 *     r0 - The matching cpu_ops pointer on Success
115	 *     r0 - 0 on failure.
116	 * Clobbers: r0 - r5
117	 */
118	.globl	get_cpu_ops_ptr
119func get_cpu_ops_ptr
120	/* Get the cpu_ops start and end locations */
121	ldr	r4, =(__CPU_OPS_START__ + CPU_MIDR)
122	ldr	r5, =(__CPU_OPS_END__ + CPU_MIDR)
123
124	/* Initialize the return parameter */
125	mov	r0, #0
126
127	/* Read the MIDR_EL1 */
128	ldcopr	r2, MIDR
129	ldr	r3, =CPU_IMPL_PN_MASK
130
131	/* Retain only the implementation and part number using mask */
132	and	r2, r2, r3
1331:
134	/* Check if we have reached end of list */
135	cmp	r4, r5
136	bhs	error_exit
137
138	/* load the midr from the cpu_ops */
139	ldr	r1, [r4], #CPU_OPS_SIZE
140	and	r1, r1, r3
141
142	/* Check if midr matches to midr of this core */
143	cmp	r1, r2
144	bne	1b
145
146	/* Subtract the increment and offset to get the cpu-ops pointer */
147	sub	r0, r4, #(CPU_OPS_SIZE + CPU_MIDR)
148error_exit:
149	bx	lr
150endfunc get_cpu_ops_ptr
151
152/*
153 * Extract CPU revision and variant, and combine them into a single numeric for
154 * easier comparison.
155 */
156	.globl	cpu_get_rev_var
157func cpu_get_rev_var
158	ldcopr	r1, MIDR
159
160	/*
161	 * Extract the variant[23:20] and revision[3:0] from r1 and pack it in
162	 * r0[0:7] as variant[7:4] and revision[3:0]:
163	 *
164	 * First extract r1[23:16] to r0[7:0] and zero fill the rest. Then
165	 * extract r1[3:0] into r0[3:0] retaining other bits.
166	 */
167	ubfx	r0, r1, #(MIDR_VAR_SHIFT - MIDR_REV_BITS), #(MIDR_REV_BITS + MIDR_VAR_BITS)
168	bfi	r0, r1, #MIDR_REV_SHIFT, #MIDR_REV_BITS
169	bx	lr
170endfunc cpu_get_rev_var
171
172/*
173 * Compare the CPU's revision-variant (r0) with a given value (r1), for errata
174 * application purposes. If the revision-variant is less than or same as a given
175 * value, indicates that errata applies; otherwise not.
176 */
177	.globl	cpu_rev_var_ls
178func cpu_rev_var_ls
179	cmp	r0, r1
180	movls	r0, #ERRATA_APPLIES
181	movhi	r0, #ERRATA_NOT_APPLIES
182	bx	lr
183endfunc cpu_rev_var_ls
184
185/*
186 * Compare the CPU's revision-variant (r0) with a given value (r1), for errata
187 * application purposes. If the revision-variant is higher than or same as a
188 * given value, indicates that errata applies; otherwise not.
189 */
190	.globl	cpu_rev_var_hs
191func cpu_rev_var_hs
192	cmp	r0, r1
193	movge	r0, #ERRATA_APPLIES
194	movlt	r0, #ERRATA_NOT_APPLIES
195	bx	lr
196endfunc cpu_rev_var_hs
197
198#if REPORT_ERRATA
199/*
200 * void print_errata_status(void);
201 *
202 * Function to print errata status for CPUs of its class. Must be called only:
203 *
204 *   - with MMU and data caches are enabled;
205 *   - after cpu_ops have been initialized in per-CPU data.
206 */
207	.globl print_errata_status
208func print_errata_status
209	/* r12 is pushed only for the sake of 8-byte stack alignment */
210	push	{r4, r5, r12, lr}
211#ifdef IMAGE_BL1
212	/*
213	 * BL1 doesn't have per-CPU data. So retrieve the CPU operations
214	 * directly.
215	 */
216	bl	get_cpu_ops_ptr
217	ldr	r0, [r0, #CPU_ERRATA_FUNC]
218	cmp	r0, #0
219	blxne	r0
220#else
221	/*
222	 * Retrieve pointer to cpu_ops, and further, the errata printing
223	 * function. If it's non-NULL, jump to the function in turn.
224	 */
225	bl	_cpu_data
226	ldr	r1, [r0, #CPU_DATA_CPU_OPS_PTR]
227	ldr	r0, [r1, #CPU_ERRATA_FUNC]
228	cmp	r0, #0
229	beq	1f
230
231	mov	r4, r0
232
233	/*
234	 * Load pointers to errata lock and printed flag. Call
235	 * errata_needs_reporting to check whether this CPU needs to report
236	 * errata status pertaining to its class.
237	 */
238	ldr	r0, [r1, #CPU_ERRATA_LOCK]
239	ldr	r1, [r1, #CPU_ERRATA_PRINTED]
240	bl	errata_needs_reporting
241	cmp	r0, #0
242	blxne	r4
2431:
244#endif
245	pop	{r4, r5, r12, pc}
246endfunc print_errata_status
247#endif
248