xref: /rk3399_ARM-atf/lib/cpus/aarch32/cpu_helpers.S (revision 71f7a363fe9d5aa6466ffd4b663cf52d9033deaa)
1e33b78a6SSoby Mathew/*
2aadb4b56SBoyan Karatotev * Copyright (c) 2016-2025, Arm Limited and Contributors. All rights reserved.
3e33b78a6SSoby Mathew *
482cb2c1aSdp-arm * SPDX-License-Identifier: BSD-3-Clause
5e33b78a6SSoby Mathew */
6e33b78a6SSoby Mathew
7e33b78a6SSoby Mathew#include <arch.h>
8e33b78a6SSoby Mathew#include <asm_macros.S>
9e33b78a6SSoby Mathew#include <assert_macros.S>
10e33b78a6SSoby Mathew#include <cpu_macros.S>
11c2ad38ceSVarun Wadekar#include <common/bl_common.h>
12dd9fae1cSBoyan Karatotev#include <lib/cpus/cpu_ops.h>
1309d40e0eSAntonio Nino Diaz#include <lib/el3_runtime/cpu_data.h>
14e33b78a6SSoby Mathew
15*71f7a363SBoyan Karatotev	/*
16*71f7a363SBoyan Karatotev	 * The below function returns the cpu_ops structure matching the
17*71f7a363SBoyan Karatotev	 * midr of the core. It reads the MIDR and finds the matching
18*71f7a363SBoyan Karatotev	 * entry in cpu_ops entries. Only the implementation and part number
19*71f7a363SBoyan Karatotev	 * are used to match the entries.
20*71f7a363SBoyan Karatotev	 * Return :
21*71f7a363SBoyan Karatotev	 *     r0 - The matching cpu_ops pointer on Success
22*71f7a363SBoyan Karatotev	 *     r0 - 0 on failure.
23*71f7a363SBoyan Karatotev	 * Clobbers: r0 - r5
24*71f7a363SBoyan Karatotev	 */
25*71f7a363SBoyan Karatotev.macro get_cpu_ops_ptr_impl
26*71f7a363SBoyan Karatotev	/* Get the cpu_ops start and end locations */
27*71f7a363SBoyan Karatotev	ldr	r4, =(__CPU_OPS_START__ + CPU_MIDR)
28*71f7a363SBoyan Karatotev	ldr	r5, =(__CPU_OPS_END__ + CPU_MIDR)
29*71f7a363SBoyan Karatotev
30*71f7a363SBoyan Karatotev	/* Initialize the return parameter */
31*71f7a363SBoyan Karatotev	mov	r0, #0
32*71f7a363SBoyan Karatotev
33*71f7a363SBoyan Karatotev	/* Read the MIDR_EL1 */
34*71f7a363SBoyan Karatotev	ldcopr	r2, MIDR
35*71f7a363SBoyan Karatotev	ldr	r3, =CPU_IMPL_PN_MASK
36*71f7a363SBoyan Karatotev
37*71f7a363SBoyan Karatotev	/* Retain only the implementation and part number using mask */
38*71f7a363SBoyan Karatotev	and	r2, r2, r3
39*71f7a363SBoyan Karatotev1:
40*71f7a363SBoyan Karatotev	/* Check if we have reached end of list */
41*71f7a363SBoyan Karatotev	cmp	r4, r5
42*71f7a363SBoyan Karatotev	bhs	error_exit\@
43*71f7a363SBoyan Karatotev
44*71f7a363SBoyan Karatotev	/* load the midr from the cpu_ops */
45*71f7a363SBoyan Karatotev	ldr	r1, [r4], #CPU_OPS_SIZE
46*71f7a363SBoyan Karatotev	and	r1, r1, r3
47*71f7a363SBoyan Karatotev
48*71f7a363SBoyan Karatotev	/* Check if midr matches to midr of this core */
49*71f7a363SBoyan Karatotev	cmp	r1, r2
50*71f7a363SBoyan Karatotev	bne	1b
51*71f7a363SBoyan Karatotev
52*71f7a363SBoyan Karatotev	/* Subtract the increment and offset to get the cpu-ops pointer */
53*71f7a363SBoyan Karatotev	sub	r0, r4, #(CPU_OPS_SIZE + CPU_MIDR)
54*71f7a363SBoyan Karatotev#if ENABLE_ASSERTIONS
55*71f7a363SBoyan Karatotev	cmp	r0, #0
56*71f7a363SBoyan Karatotev	ASM_ASSERT(ne)
57*71f7a363SBoyan Karatotev#endif
58*71f7a363SBoyan Karatoteverror_exit\@:
59*71f7a363SBoyan Karatotev.endm
60*71f7a363SBoyan Karatotev
6142d4d3baSArvind Ram Prakash#if defined(IMAGE_BL1) || defined(IMAGE_BL32) || \
6242d4d3baSArvind Ram Prakash	(defined(IMAGE_BL2) && RESET_TO_BL2)
63e33b78a6SSoby Mathew	/*
64e33b78a6SSoby Mathew	 * The reset handler common to all platforms.  After a matching
65e33b78a6SSoby Mathew	 * cpu_ops structure entry is found, the correponding reset_handler
66e33b78a6SSoby Mathew	 * in the cpu_ops is invoked. The reset handler is invoked very early
67e33b78a6SSoby Mathew	 * in the boot sequence and it is assumed that we can clobber r0 - r10
68e33b78a6SSoby Mathew	 * without the need to follow AAPCS.
69e33b78a6SSoby Mathew	 * Clobbers: r0 - r10
70e33b78a6SSoby Mathew	 */
71e33b78a6SSoby Mathew	.globl	reset_handler
72e33b78a6SSoby Mathewfunc reset_handler
73c6c10b02SHeiko Stuebner	mov	r8, lr
74e33b78a6SSoby Mathew
75c6c10b02SHeiko Stuebner	/* The plat_reset_handler can clobber r0 - r7 */
76e33b78a6SSoby Mathew	bl	plat_reset_handler
77e33b78a6SSoby Mathew
78e33b78a6SSoby Mathew	/* Get the matching cpu_ops pointer (clobbers: r0 - r5) */
79*71f7a363SBoyan Karatotev	get_cpu_ops_ptr_impl
80e33b78a6SSoby Mathew
81044bb2faSAntonio Nino Diaz#if ENABLE_ASSERTIONS
82e33b78a6SSoby Mathew	cmp	r0, #0
83e33b78a6SSoby Mathew	ASM_ASSERT(ne)
84e33b78a6SSoby Mathew#endif
85e33b78a6SSoby Mathew
86e33b78a6SSoby Mathew	/* Get the cpu_ops reset handler */
87e33b78a6SSoby Mathew	ldr	r1, [r0, #CPU_RESET_FUNC]
88e33b78a6SSoby Mathew	cmp	r1, #0
89c6c10b02SHeiko Stuebner	mov	lr, r8
90e33b78a6SSoby Mathew	bxne	r1
91e33b78a6SSoby Mathew	bx	lr
92e33b78a6SSoby Mathewendfunc reset_handler
93e33b78a6SSoby Mathew
94b1d27b48SRoberto Vargas#endif
951a0a3f06SYatharth Kochar
96*71f7a363SBoyan Karatotev	.globl	get_cpu_ops_ptr
97*71f7a363SBoyan Karatotev/* performs an AAPCS compliant call to get_cpu_ops_ptr_impl */
98*71f7a363SBoyan Karatotevfunc get_cpu_ops_ptr
99*71f7a363SBoyan Karatotev	push	{r4 - r5, lr}
100*71f7a363SBoyan Karatotev	get_cpu_ops_ptr_impl
101*71f7a363SBoyan Karatotev	pop	{r4 - r5, pc}
102*71f7a363SBoyan Karatotevendfunc get_cpu_ops_ptr
103*71f7a363SBoyan Karatotev
104aadb4b56SBoyan Karatotev#ifdef IMAGE_BL32
105e33b78a6SSoby Mathew	/*
106e33b78a6SSoby Mathew	 * Initializes the cpu_ops_ptr if not already initialized
107e33b78a6SSoby Mathew	 * in cpu_data. This must only be called after the data cache
108e33b78a6SSoby Mathew	 * is enabled. AAPCS is followed.
109e33b78a6SSoby Mathew	 */
110e33b78a6SSoby Mathew	.globl	init_cpu_ops
111e33b78a6SSoby Mathewfunc init_cpu_ops
112e33b78a6SSoby Mathew	push	{r4 - r6, lr}
113e33b78a6SSoby Mathew	bl	_cpu_data
114e33b78a6SSoby Mathew	mov	r6, r0
115e33b78a6SSoby Mathew	ldr	r1, [r0, #CPU_DATA_CPU_OPS_PTR]
116e33b78a6SSoby Mathew	cmp	r1, #0
117e33b78a6SSoby Mathew	bne	1f
118e33b78a6SSoby Mathew	bl	get_cpu_ops_ptr
119044bb2faSAntonio Nino Diaz#if ENABLE_ASSERTIONS
120e33b78a6SSoby Mathew	cmp	r0, #0
121e33b78a6SSoby Mathew	ASM_ASSERT(ne)
122e33b78a6SSoby Mathew#endif
123e33b78a6SSoby Mathew	str	r0, [r6, #CPU_DATA_CPU_OPS_PTR]!
124e33b78a6SSoby Mathew1:
125e33b78a6SSoby Mathew	pop	{r4 - r6, pc}
126e33b78a6SSoby Mathewendfunc init_cpu_ops
127e33b78a6SSoby Mathew
1281a0a3f06SYatharth Kochar#endif /* IMAGE_BL32 */
1291a0a3f06SYatharth Kochar
130e33b78a6SSoby Mathew/*
13110bcd761SJeenu Viswambharan * Extract CPU revision and variant, and combine them into a single numeric for
13210bcd761SJeenu Viswambharan * easier comparison.
13310bcd761SJeenu Viswambharan */
13410bcd761SJeenu Viswambharan	.globl	cpu_get_rev_var
13510bcd761SJeenu Viswambharanfunc cpu_get_rev_var
13610bcd761SJeenu Viswambharan	ldcopr	r1, MIDR
13710bcd761SJeenu Viswambharan
13810bcd761SJeenu Viswambharan	/*
13910bcd761SJeenu Viswambharan	 * Extract the variant[23:20] and revision[3:0] from r1 and pack it in
14010bcd761SJeenu Viswambharan	 * r0[0:7] as variant[7:4] and revision[3:0]:
14110bcd761SJeenu Viswambharan	 *
14210bcd761SJeenu Viswambharan	 * First extract r1[23:16] to r0[7:0] and zero fill the rest. Then
14310bcd761SJeenu Viswambharan	 * extract r1[3:0] into r0[3:0] retaining other bits.
14410bcd761SJeenu Viswambharan	 */
14510bcd761SJeenu Viswambharan	ubfx	r0, r1, #(MIDR_VAR_SHIFT - MIDR_REV_BITS), #(MIDR_REV_BITS + MIDR_VAR_BITS)
14610bcd761SJeenu Viswambharan	bfi	r0, r1, #MIDR_REV_SHIFT, #MIDR_REV_BITS
14710bcd761SJeenu Viswambharan	bx	lr
14810bcd761SJeenu Viswambharanendfunc cpu_get_rev_var
14910bcd761SJeenu Viswambharan
15010bcd761SJeenu Viswambharan/*
15110bcd761SJeenu Viswambharan * Compare the CPU's revision-variant (r0) with a given value (r1), for errata
15210bcd761SJeenu Viswambharan * application purposes. If the revision-variant is less than or same as a given
15310bcd761SJeenu Viswambharan * value, indicates that errata applies; otherwise not.
15410bcd761SJeenu Viswambharan */
15510bcd761SJeenu Viswambharan	.globl	cpu_rev_var_ls
15610bcd761SJeenu Viswambharanfunc cpu_rev_var_ls
15710bcd761SJeenu Viswambharan	cmp	r0, r1
15810bcd761SJeenu Viswambharan	movls	r0, #ERRATA_APPLIES
15910bcd761SJeenu Viswambharan	movhi	r0, #ERRATA_NOT_APPLIES
16010bcd761SJeenu Viswambharan	bx	lr
16110bcd761SJeenu Viswambharanendfunc cpu_rev_var_ls
16210bcd761SJeenu Viswambharan
16356e04999SDimitris Papastamos/*
16456e04999SDimitris Papastamos * Compare the CPU's revision-variant (r0) with a given value (r1), for errata
16556e04999SDimitris Papastamos * application purposes. If the revision-variant is higher than or same as a
16656e04999SDimitris Papastamos * given value, indicates that errata applies; otherwise not.
16756e04999SDimitris Papastamos */
16856e04999SDimitris Papastamos	.globl	cpu_rev_var_hs
16956e04999SDimitris Papastamosfunc cpu_rev_var_hs
17056e04999SDimitris Papastamos	cmp	r0, r1
17156e04999SDimitris Papastamos	movge	r0, #ERRATA_APPLIES
17256e04999SDimitris Papastamos	movlt	r0, #ERRATA_NOT_APPLIES
17356e04999SDimitris Papastamos	bx	lr
17456e04999SDimitris Papastamosendfunc cpu_rev_var_hs
175