xref: /rk3399_ARM-atf/lib/cpus/aarch64/cpu_helpers.S (revision 51faada71a219a8b94cd8d8e423f0f22e9da4d8f)
1/*
2 * Copyright (c) 2014-2017, ARM Limited and Contributors. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are met:
6 *
7 * Redistributions of source code must retain the above copyright notice, this
8 * list of conditions and the following disclaimer.
9 *
10 * Redistributions in binary form must reproduce the above copyright notice,
11 * this list of conditions and the following disclaimer in the documentation
12 * and/or other materials provided with the distribution.
13 *
14 * Neither the name of ARM nor the names of its contributors may be used
15 * to endorse or promote products derived from this software without specific
16 * prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
19 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
22 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
28 * POSSIBILITY OF SUCH DAMAGE.
29 */
30
31#include <arch.h>
32#include <asm_macros.S>
33#include <assert_macros.S>
34#ifdef IMAGE_BL31
35#include <cpu_data.h>
36#endif
37#include <cpu_macros.S>
38#include <debug.h>
39#include <errata_report.h>
40
41 /* Reset fn is needed in BL at reset vector */
42#if defined(IMAGE_BL1) || defined(IMAGE_BL31)
43	/*
44	 * The reset handler common to all platforms.  After a matching
45	 * cpu_ops structure entry is found, the correponding reset_handler
46	 * in the cpu_ops is invoked.
47	 * Clobbers: x0 - x19, x30
48	 */
49	.globl	reset_handler
50func reset_handler
51	mov	x19, x30
52
53	/* The plat_reset_handler can clobber x0 - x18, x30 */
54	bl	plat_reset_handler
55
56	/* Get the matching cpu_ops pointer */
57	bl	get_cpu_ops_ptr
58#if ASM_ASSERTION
59	cmp	x0, #0
60	ASM_ASSERT(ne)
61#endif
62
63	/* Get the cpu_ops reset handler */
64	ldr	x2, [x0, #CPU_RESET_FUNC]
65	mov	x30, x19
66	cbz	x2, 1f
67
68	/* The cpu_ops reset handler can clobber x0 - x19, x30 */
69	br	x2
701:
71	ret
72endfunc reset_handler
73
74#endif /* IMAGE_BL1 || IMAGE_BL31 */
75
76#ifdef IMAGE_BL31 /* The power down core and cluster is needed only in  BL31 */
77	/*
78	 * void prepare_cpu_pwr_dwn(unsigned int power_level)
79	 *
80	 * Prepare CPU power down function for all platforms. The function takes
81	 * a domain level to be powered down as its parameter. After the cpu_ops
82	 * pointer is retrieved from cpu_data, the handler for requested power
83	 * level is called.
84	 */
85	.globl	prepare_cpu_pwr_dwn
86func prepare_cpu_pwr_dwn
87	/*
88	 * If the given power level exceeds CPU_MAX_PWR_DWN_OPS, we call the
89	 * power down handler for the last power level
90	 */
91	mov_imm	x2, (CPU_MAX_PWR_DWN_OPS - 1)
92	cmp	x0, x2
93	csel	x2, x2, x0, hi
94
95	mrs	x1, tpidr_el3
96	ldr	x0, [x1, #CPU_DATA_CPU_OPS_PTR]
97#if ASM_ASSERTION
98	cmp	x0, #0
99	ASM_ASSERT(ne)
100#endif
101
102	/* Get the appropriate power down handler */
103	mov	x1, #CPU_PWR_DWN_OPS
104	add	x1, x1, x2, lsl #3
105	ldr	x1, [x0, x1]
106	br	x1
107endfunc prepare_cpu_pwr_dwn
108
109
110	/*
111	 * Initializes the cpu_ops_ptr if not already initialized
112	 * in cpu_data. This can be called without a runtime stack, but may
113	 * only be called after the MMU is enabled.
114	 * clobbers: x0 - x6, x10
115	 */
116	.globl	init_cpu_ops
117func init_cpu_ops
118	mrs	x6, tpidr_el3
119	ldr	x0, [x6, #CPU_DATA_CPU_OPS_PTR]
120	cbnz	x0, 1f
121	mov	x10, x30
122	bl	get_cpu_ops_ptr
123#if ASM_ASSERTION
124	cmp	x0, #0
125	ASM_ASSERT(ne)
126#endif
127	str	x0, [x6, #CPU_DATA_CPU_OPS_PTR]!
128	mov x30, x10
1291:
130	ret
131endfunc init_cpu_ops
132#endif /* IMAGE_BL31 */
133
134#if defined(IMAGE_BL31) && CRASH_REPORTING
135	/*
136	 * The cpu specific registers which need to be reported in a crash
137	 * are reported via cpu_ops cpu_reg_dump function. After a matching
138	 * cpu_ops structure entry is found, the correponding cpu_reg_dump
139	 * in the cpu_ops is invoked.
140	 */
141	.globl	do_cpu_reg_dump
142func do_cpu_reg_dump
143	mov	x16, x30
144
145	/* Get the matching cpu_ops pointer */
146	bl	get_cpu_ops_ptr
147	cbz	x0, 1f
148
149	/* Get the cpu_ops cpu_reg_dump */
150	ldr	x2, [x0, #CPU_REG_DUMP]
151	cbz	x2, 1f
152	blr	x2
1531:
154	mov	x30, x16
155	ret
156endfunc do_cpu_reg_dump
157#endif
158
159	/*
160	 * The below function returns the cpu_ops structure matching the
161	 * midr of the core. It reads the MIDR_EL1 and finds the matching
162	 * entry in cpu_ops entries. Only the implementation and part number
163	 * are used to match the entries.
164	 * Return :
165	 *     x0 - The matching cpu_ops pointer on Success
166	 *     x0 - 0 on failure.
167	 * Clobbers : x0 - x5
168	 */
169	.globl	get_cpu_ops_ptr
170func get_cpu_ops_ptr
171	/* Get the cpu_ops start and end locations */
172	adr	x4, (__CPU_OPS_START__ + CPU_MIDR)
173	adr	x5, (__CPU_OPS_END__ + CPU_MIDR)
174
175	/* Initialize the return parameter */
176	mov	x0, #0
177
178	/* Read the MIDR_EL1 */
179	mrs	x2, midr_el1
180	mov_imm	x3, CPU_IMPL_PN_MASK
181
182	/* Retain only the implementation and part number using mask */
183	and	w2, w2, w3
1841:
185	/* Check if we have reached end of list */
186	cmp	x4, x5
187	b.eq	error_exit
188
189	/* load the midr from the cpu_ops */
190	ldr	x1, [x4], #CPU_OPS_SIZE
191	and	w1, w1, w3
192
193	/* Check if midr matches to midr of this core */
194	cmp	w1, w2
195	b.ne	1b
196
197	/* Subtract the increment and offset to get the cpu-ops pointer */
198	sub	x0, x4, #(CPU_OPS_SIZE + CPU_MIDR)
199error_exit:
200	ret
201endfunc get_cpu_ops_ptr
202
203/*
204 * Extract CPU revision and variant, and combine them into a single numeric for
205 * easier comparison.
206 */
207	.globl	cpu_get_rev_var
208func cpu_get_rev_var
209	mrs	x1, midr_el1
210
211	/*
212	 * Extract the variant[23:20] and revision[3:0] from MIDR, and pack them
213	 * as variant[7:4] and revision[3:0] of x0.
214	 *
215	 * First extract x1[23:16] to x0[7:0] and zero fill the rest. Then
216	 * extract x1[3:0] into x0[3:0] retaining other bits.
217	 */
218	ubfx	x0, x1, #(MIDR_VAR_SHIFT - MIDR_REV_BITS), #(MIDR_REV_BITS + MIDR_VAR_BITS)
219	bfxil	x0, x1, #MIDR_REV_SHIFT, #MIDR_REV_BITS
220	ret
221endfunc cpu_get_rev_var
222
223/*
224 * Compare the CPU's revision-variant (x0) with a given value (x1), for errata
225 * application purposes. If the revision-variant is less than or same as a given
226 * value, indicates that errata applies; otherwise not.
227 */
228	.globl	cpu_rev_var_ls
229func cpu_rev_var_ls
230	mov	x2, #ERRATA_APPLIES
231	mov	x3, #ERRATA_NOT_APPLIES
232	cmp	x0, x1
233	csel	x0, x2, x3, ls
234	ret
235endfunc cpu_rev_var_ls
236
237/*
238 * Compare the CPU's revision-variant (x0) with a given value (x1), for errata
239 * application purposes. If the revision-variant is higher than or same as a
240 * given value, indicates that errata applies; otherwise not.
241 */
242	.globl	cpu_rev_var_hs
243func cpu_rev_var_hs
244	mov	x2, #ERRATA_APPLIES
245	mov	x3, #ERRATA_NOT_APPLIES
246	cmp	x0, x1
247	csel	x0, x2, x3, hs
248	ret
249endfunc cpu_rev_var_hs
250
251#if REPORT_ERRATA
252/*
253 * void print_errata_status(void);
254 *
255 * Function to print errata status for CPUs of its class. Must be called only:
256 *
257 *   - with MMU and data caches are enabled;
258 *   - after cpu_ops have been initialized in per-CPU data.
259 */
260	.globl print_errata_status
261func print_errata_status
262#ifdef IMAGE_BL1
263	/*
264	 * BL1 doesn't have per-CPU data. So retrieve the CPU operations
265	 * directly.
266	 */
267	stp	xzr, x30, [sp, #-16]!
268	bl	get_cpu_ops_ptr
269	ldp	xzr, x30, [sp], #16
270	ldr	x1, [x0, #CPU_ERRATA_FUNC]
271	cbnz	x1, .Lprint
272#else
273	/*
274	 * Retrieve pointer to cpu_ops from per-CPU data, and further, the
275	 * errata printing function. If it's non-NULL, jump to the function in
276	 * turn.
277	 */
278	mrs	x0, tpidr_el3
279	ldr	x1, [x0, #CPU_DATA_CPU_OPS_PTR]
280	ldr	x0, [x1, #CPU_ERRATA_FUNC]
281	cbz	x0, .Lnoprint
282
283	/*
284	 * Printing errata status requires atomically testing the printed flag.
285	 */
286	stp	x8, x30, [sp, #-16]!
287	mov	x8, x0
288
289	/*
290	 * Load pointers to errata lock and printed flag. Call
291	 * errata_needs_reporting to check whether this CPU needs to report
292	 * errata status pertaining to its class.
293	 */
294	ldr	x0, [x1, #CPU_ERRATA_LOCK]
295	ldr	x1, [x1, #CPU_ERRATA_PRINTED]
296	bl	errata_needs_reporting
297	mov	x1, x8
298	ldp	x8, x30, [sp], #16
299	cbnz	x0, .Lprint
300#endif
301.Lnoprint:
302	ret
303.Lprint:
304	/* Jump to errata reporting function for this CPU */
305	br	x1
306endfunc print_errata_status
307#endif
308