xref: /rk3399_ARM-atf/lib/cpus/aarch32/cpu_helpers.S (revision d57362bd92c2e5c8a1222fd763e24163c1234938)
1/*
2 * Copyright (c) 2016-2025, Arm Limited and Contributors. All rights reserved.
3 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 */
6
7#include <arch.h>
8#include <asm_macros.S>
9#include <assert_macros.S>
10#include <cpu_macros.S>
11#include <common/bl_common.h>
12#include <lib/cpus/cpu_ops.h>
13#include <lib/el3_runtime/cpu_data.h>
14
15#if defined(IMAGE_BL1) || defined(IMAGE_BL32) || \
16	(defined(IMAGE_BL2) && RESET_TO_BL2)
17	/*
18	 * The reset handler common to all platforms.  After a matching
19	 * cpu_ops structure entry is found, the correponding reset_handler
20	 * in the cpu_ops is invoked. The reset handler is invoked very early
21	 * in the boot sequence and it is assumed that we can clobber r0 - r10
22	 * without the need to follow AAPCS.
23	 * Clobbers: r0 - r10
24	 */
25	.globl	reset_handler
26func reset_handler
27	mov	r8, lr
28
29	/* The plat_reset_handler can clobber r0 - r7 */
30	bl	plat_reset_handler
31
32	/* Get the matching cpu_ops pointer (clobbers: r0 - r5) */
33	bl	get_cpu_ops_ptr
34
35#if ENABLE_ASSERTIONS
36	cmp	r0, #0
37	ASM_ASSERT(ne)
38#endif
39
40	/* Get the cpu_ops reset handler */
41	ldr	r1, [r0, #CPU_RESET_FUNC]
42	cmp	r1, #0
43	mov	lr, r8
44	bxne	r1
45	bx	lr
46endfunc reset_handler
47
48#endif
49
50#ifdef IMAGE_BL32
51	/*
52	 * Initializes the cpu_ops_ptr if not already initialized
53	 * in cpu_data. This must only be called after the data cache
54	 * is enabled. AAPCS is followed.
55	 */
56	.globl	init_cpu_ops
57func init_cpu_ops
58	push	{r4 - r6, lr}
59	bl	_cpu_data
60	mov	r6, r0
61	ldr	r1, [r0, #CPU_DATA_CPU_OPS_PTR]
62	cmp	r1, #0
63	bne	1f
64	bl	get_cpu_ops_ptr
65#if ENABLE_ASSERTIONS
66	cmp	r0, #0
67	ASM_ASSERT(ne)
68#endif
69	str	r0, [r6, #CPU_DATA_CPU_OPS_PTR]!
701:
71	pop	{r4 - r6, pc}
72endfunc init_cpu_ops
73
74#endif /* IMAGE_BL32 */
75
76	/*
77	 * The below function returns the cpu_ops structure matching the
78	 * midr of the core. It reads the MIDR and finds the matching
79	 * entry in cpu_ops entries. Only the implementation and part number
80	 * are used to match the entries.
81	 * Return :
82	 *     r0 - The matching cpu_ops pointer on Success
83	 *     r0 - 0 on failure.
84	 * Clobbers: r0 - r5
85	 */
86	.globl	get_cpu_ops_ptr
87func get_cpu_ops_ptr
88	/* Get the cpu_ops start and end locations */
89	ldr	r4, =(__CPU_OPS_START__ + CPU_MIDR)
90	ldr	r5, =(__CPU_OPS_END__ + CPU_MIDR)
91
92	/* Initialize the return parameter */
93	mov	r0, #0
94
95	/* Read the MIDR_EL1 */
96	ldcopr	r2, MIDR
97	ldr	r3, =CPU_IMPL_PN_MASK
98
99	/* Retain only the implementation and part number using mask */
100	and	r2, r2, r3
1011:
102	/* Check if we have reached end of list */
103	cmp	r4, r5
104	bhs	error_exit
105
106	/* load the midr from the cpu_ops */
107	ldr	r1, [r4], #CPU_OPS_SIZE
108	and	r1, r1, r3
109
110	/* Check if midr matches to midr of this core */
111	cmp	r1, r2
112	bne	1b
113
114	/* Subtract the increment and offset to get the cpu-ops pointer */
115	sub	r0, r4, #(CPU_OPS_SIZE + CPU_MIDR)
116#if ENABLE_ASSERTIONS
117	cmp	r0, #0
118	ASM_ASSERT(ne)
119#endif
120error_exit:
121	bx	lr
122endfunc get_cpu_ops_ptr
123
124/*
125 * Extract CPU revision and variant, and combine them into a single numeric for
126 * easier comparison.
127 */
128	.globl	cpu_get_rev_var
129func cpu_get_rev_var
130	ldcopr	r1, MIDR
131
132	/*
133	 * Extract the variant[23:20] and revision[3:0] from r1 and pack it in
134	 * r0[0:7] as variant[7:4] and revision[3:0]:
135	 *
136	 * First extract r1[23:16] to r0[7:0] and zero fill the rest. Then
137	 * extract r1[3:0] into r0[3:0] retaining other bits.
138	 */
139	ubfx	r0, r1, #(MIDR_VAR_SHIFT - MIDR_REV_BITS), #(MIDR_REV_BITS + MIDR_VAR_BITS)
140	bfi	r0, r1, #MIDR_REV_SHIFT, #MIDR_REV_BITS
141	bx	lr
142endfunc cpu_get_rev_var
143
144/*
145 * Compare the CPU's revision-variant (r0) with a given value (r1), for errata
146 * application purposes. If the revision-variant is less than or same as a given
147 * value, indicates that errata applies; otherwise not.
148 */
149	.globl	cpu_rev_var_ls
150func cpu_rev_var_ls
151	cmp	r0, r1
152	movls	r0, #ERRATA_APPLIES
153	movhi	r0, #ERRATA_NOT_APPLIES
154	bx	lr
155endfunc cpu_rev_var_ls
156
157/*
158 * Compare the CPU's revision-variant (r0) with a given value (r1), for errata
159 * application purposes. If the revision-variant is higher than or same as a
160 * given value, indicates that errata applies; otherwise not.
161 */
162	.globl	cpu_rev_var_hs
163func cpu_rev_var_hs
164	cmp	r0, r1
165	movge	r0, #ERRATA_APPLIES
166	movlt	r0, #ERRATA_NOT_APPLIES
167	bx	lr
168endfunc cpu_rev_var_hs
169