xref: /rk3399_ARM-atf/lib/cpus/aarch32/cpu_helpers.S (revision 71f7a363fe9d5aa6466ffd4b663cf52d9033deaa)
1/*
2 * Copyright (c) 2016-2025, Arm Limited and Contributors. All rights reserved.
3 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 */
6
7#include <arch.h>
8#include <asm_macros.S>
9#include <assert_macros.S>
10#include <cpu_macros.S>
11#include <common/bl_common.h>
12#include <lib/cpus/cpu_ops.h>
13#include <lib/el3_runtime/cpu_data.h>
14
15	/*
16	 * The below function returns the cpu_ops structure matching the
17	 * midr of the core. It reads the MIDR and finds the matching
18	 * entry in cpu_ops entries. Only the implementation and part number
19	 * are used to match the entries.
20	 * Return :
21	 *     r0 - The matching cpu_ops pointer on Success
22	 *     r0 - 0 on failure.
23	 * Clobbers: r0 - r5
24	 */
25.macro get_cpu_ops_ptr_impl
26	/* Get the cpu_ops start and end locations */
27	ldr	r4, =(__CPU_OPS_START__ + CPU_MIDR)
28	ldr	r5, =(__CPU_OPS_END__ + CPU_MIDR)
29
30	/* Initialize the return parameter */
31	mov	r0, #0
32
33	/* Read the MIDR_EL1 */
34	ldcopr	r2, MIDR
35	ldr	r3, =CPU_IMPL_PN_MASK
36
37	/* Retain only the implementation and part number using mask */
38	and	r2, r2, r3
391:
40	/* Check if we have reached end of list */
41	cmp	r4, r5
42	bhs	error_exit\@
43
44	/* load the midr from the cpu_ops */
45	ldr	r1, [r4], #CPU_OPS_SIZE
46	and	r1, r1, r3
47
48	/* Check if midr matches to midr of this core */
49	cmp	r1, r2
50	bne	1b
51
52	/* Subtract the increment and offset to get the cpu-ops pointer */
53	sub	r0, r4, #(CPU_OPS_SIZE + CPU_MIDR)
54#if ENABLE_ASSERTIONS
55	cmp	r0, #0
56	ASM_ASSERT(ne)
57#endif
58error_exit\@:
59.endm
60
61#if defined(IMAGE_BL1) || defined(IMAGE_BL32) || \
62	(defined(IMAGE_BL2) && RESET_TO_BL2)
63	/*
64	 * The reset handler common to all platforms.  After a matching
65	 * cpu_ops structure entry is found, the correponding reset_handler
66	 * in the cpu_ops is invoked. The reset handler is invoked very early
67	 * in the boot sequence and it is assumed that we can clobber r0 - r10
68	 * without the need to follow AAPCS.
69	 * Clobbers: r0 - r10
70	 */
71	.globl	reset_handler
72func reset_handler
73	mov	r8, lr
74
75	/* The plat_reset_handler can clobber r0 - r7 */
76	bl	plat_reset_handler
77
78	/* Get the matching cpu_ops pointer (clobbers: r0 - r5) */
79	get_cpu_ops_ptr_impl
80
81#if ENABLE_ASSERTIONS
82	cmp	r0, #0
83	ASM_ASSERT(ne)
84#endif
85
86	/* Get the cpu_ops reset handler */
87	ldr	r1, [r0, #CPU_RESET_FUNC]
88	cmp	r1, #0
89	mov	lr, r8
90	bxne	r1
91	bx	lr
92endfunc reset_handler
93
94#endif
95
96	.globl	get_cpu_ops_ptr
97/* performs an AAPCS compliant call to get_cpu_ops_ptr_impl */
98func get_cpu_ops_ptr
99	push	{r4 - r5, lr}
100	get_cpu_ops_ptr_impl
101	pop	{r4 - r5, pc}
102endfunc get_cpu_ops_ptr
103
104#ifdef IMAGE_BL32
105	/*
106	 * Initializes the cpu_ops_ptr if not already initialized
107	 * in cpu_data. This must only be called after the data cache
108	 * is enabled. AAPCS is followed.
109	 */
110	.globl	init_cpu_ops
111func init_cpu_ops
112	push	{r4 - r6, lr}
113	bl	_cpu_data
114	mov	r6, r0
115	ldr	r1, [r0, #CPU_DATA_CPU_OPS_PTR]
116	cmp	r1, #0
117	bne	1f
118	bl	get_cpu_ops_ptr
119#if ENABLE_ASSERTIONS
120	cmp	r0, #0
121	ASM_ASSERT(ne)
122#endif
123	str	r0, [r6, #CPU_DATA_CPU_OPS_PTR]!
1241:
125	pop	{r4 - r6, pc}
126endfunc init_cpu_ops
127
128#endif /* IMAGE_BL32 */
129
130/*
131 * Extract CPU revision and variant, and combine them into a single numeric for
132 * easier comparison.
133 */
134	.globl	cpu_get_rev_var
135func cpu_get_rev_var
136	ldcopr	r1, MIDR
137
138	/*
139	 * Extract the variant[23:20] and revision[3:0] from r1 and pack it in
140	 * r0[0:7] as variant[7:4] and revision[3:0]:
141	 *
142	 * First extract r1[23:16] to r0[7:0] and zero fill the rest. Then
143	 * extract r1[3:0] into r0[3:0] retaining other bits.
144	 */
145	ubfx	r0, r1, #(MIDR_VAR_SHIFT - MIDR_REV_BITS), #(MIDR_REV_BITS + MIDR_VAR_BITS)
146	bfi	r0, r1, #MIDR_REV_SHIFT, #MIDR_REV_BITS
147	bx	lr
148endfunc cpu_get_rev_var
149
150/*
151 * Compare the CPU's revision-variant (r0) with a given value (r1), for errata
152 * application purposes. If the revision-variant is less than or same as a given
153 * value, indicates that errata applies; otherwise not.
154 */
155	.globl	cpu_rev_var_ls
156func cpu_rev_var_ls
157	cmp	r0, r1
158	movls	r0, #ERRATA_APPLIES
159	movhi	r0, #ERRATA_NOT_APPLIES
160	bx	lr
161endfunc cpu_rev_var_ls
162
163/*
164 * Compare the CPU's revision-variant (r0) with a given value (r1), for errata
165 * application purposes. If the revision-variant is higher than or same as a
166 * given value, indicates that errata applies; otherwise not.
167 */
168	.globl	cpu_rev_var_hs
169func cpu_rev_var_hs
170	cmp	r0, r1
171	movge	r0, #ERRATA_APPLIES
172	movlt	r0, #ERRATA_NOT_APPLIES
173	bx	lr
174endfunc cpu_rev_var_hs
175