xref: /rk3399_ARM-atf/lib/cpus/aarch32/cpu_helpers.S (revision aadb4b56c6543b524ed2d13ace63608c316edd5b)
1e33b78a6SSoby Mathew/*
2*aadb4b56SBoyan Karatotev * Copyright (c) 2016-2025, Arm Limited and Contributors. All rights reserved.
3e33b78a6SSoby Mathew *
482cb2c1aSdp-arm * SPDX-License-Identifier: BSD-3-Clause
5e33b78a6SSoby Mathew */
6e33b78a6SSoby Mathew
7e33b78a6SSoby Mathew#include <arch.h>
8e33b78a6SSoby Mathew#include <asm_macros.S>
9e33b78a6SSoby Mathew#include <assert_macros.S>
10e33b78a6SSoby Mathew#include <cpu_macros.S>
11c2ad38ceSVarun Wadekar#include <common/bl_common.h>
12dd9fae1cSBoyan Karatotev#include <lib/cpus/cpu_ops.h>
1309d40e0eSAntonio Nino Diaz#include <lib/el3_runtime/cpu_data.h>
14e33b78a6SSoby Mathew
1542d4d3baSArvind Ram Prakash#if defined(IMAGE_BL1) || defined(IMAGE_BL32) || \
1642d4d3baSArvind Ram Prakash	(defined(IMAGE_BL2) && RESET_TO_BL2)
17e33b78a6SSoby Mathew	/*
18e33b78a6SSoby Mathew	 * The reset handler common to all platforms.  After a matching
19e33b78a6SSoby Mathew	 * cpu_ops structure entry is found, the correponding reset_handler
20e33b78a6SSoby Mathew	 * in the cpu_ops is invoked. The reset handler is invoked very early
21e33b78a6SSoby Mathew	 * in the boot sequence and it is assumed that we can clobber r0 - r10
22e33b78a6SSoby Mathew	 * without the need to follow AAPCS.
23e33b78a6SSoby Mathew	 * Clobbers: r0 - r10
24e33b78a6SSoby Mathew	 */
25e33b78a6SSoby Mathew	.globl	reset_handler
26e33b78a6SSoby Mathewfunc reset_handler
27c6c10b02SHeiko Stuebner	mov	r8, lr
28e33b78a6SSoby Mathew
29c6c10b02SHeiko Stuebner	/* The plat_reset_handler can clobber r0 - r7 */
30e33b78a6SSoby Mathew	bl	plat_reset_handler
31e33b78a6SSoby Mathew
32e33b78a6SSoby Mathew	/* Get the matching cpu_ops pointer (clobbers: r0 - r5) */
33e33b78a6SSoby Mathew	bl	get_cpu_ops_ptr
34e33b78a6SSoby Mathew
35044bb2faSAntonio Nino Diaz#if ENABLE_ASSERTIONS
36e33b78a6SSoby Mathew	cmp	r0, #0
37e33b78a6SSoby Mathew	ASM_ASSERT(ne)
38e33b78a6SSoby Mathew#endif
39e33b78a6SSoby Mathew
40e33b78a6SSoby Mathew	/* Get the cpu_ops reset handler */
41e33b78a6SSoby Mathew	ldr	r1, [r0, #CPU_RESET_FUNC]
42e33b78a6SSoby Mathew	cmp	r1, #0
43c6c10b02SHeiko Stuebner	mov	lr, r8
44e33b78a6SSoby Mathew	bxne	r1
45e33b78a6SSoby Mathew	bx	lr
46e33b78a6SSoby Mathewendfunc reset_handler
47e33b78a6SSoby Mathew
48b1d27b48SRoberto Vargas#endif
491a0a3f06SYatharth Kochar
50*aadb4b56SBoyan Karatotev#ifdef IMAGE_BL32
51e33b78a6SSoby Mathew	/*
52e33b78a6SSoby Mathew	 * Initializes the cpu_ops_ptr if not already initialized
53e33b78a6SSoby Mathew	 * in cpu_data. This must only be called after the data cache
54e33b78a6SSoby Mathew	 * is enabled. AAPCS is followed.
55e33b78a6SSoby Mathew	 */
56e33b78a6SSoby Mathew	.globl	init_cpu_ops
57e33b78a6SSoby Mathewfunc init_cpu_ops
58e33b78a6SSoby Mathew	push	{r4 - r6, lr}
59e33b78a6SSoby Mathew	bl	_cpu_data
60e33b78a6SSoby Mathew	mov	r6, r0
61e33b78a6SSoby Mathew	ldr	r1, [r0, #CPU_DATA_CPU_OPS_PTR]
62e33b78a6SSoby Mathew	cmp	r1, #0
63e33b78a6SSoby Mathew	bne	1f
64e33b78a6SSoby Mathew	bl	get_cpu_ops_ptr
65044bb2faSAntonio Nino Diaz#if ENABLE_ASSERTIONS
66e33b78a6SSoby Mathew	cmp	r0, #0
67e33b78a6SSoby Mathew	ASM_ASSERT(ne)
68e33b78a6SSoby Mathew#endif
69e33b78a6SSoby Mathew	str	r0, [r6, #CPU_DATA_CPU_OPS_PTR]!
70e33b78a6SSoby Mathew1:
71e33b78a6SSoby Mathew	pop	{r4 - r6, pc}
72e33b78a6SSoby Mathewendfunc init_cpu_ops
73e33b78a6SSoby Mathew
741a0a3f06SYatharth Kochar#endif /* IMAGE_BL32 */
751a0a3f06SYatharth Kochar
76e33b78a6SSoby Mathew	/*
77e33b78a6SSoby Mathew	 * The below function returns the cpu_ops structure matching the
78e33b78a6SSoby Mathew	 * midr of the core. It reads the MIDR and finds the matching
79e33b78a6SSoby Mathew	 * entry in cpu_ops entries. Only the implementation and part number
80e33b78a6SSoby Mathew	 * are used to match the entries.
81e33b78a6SSoby Mathew	 * Return :
82e33b78a6SSoby Mathew	 *     r0 - The matching cpu_ops pointer on Success
83e33b78a6SSoby Mathew	 *     r0 - 0 on failure.
84e33b78a6SSoby Mathew	 * Clobbers: r0 - r5
85e33b78a6SSoby Mathew	 */
86e33b78a6SSoby Mathew	.globl	get_cpu_ops_ptr
87e33b78a6SSoby Mathewfunc get_cpu_ops_ptr
88e33b78a6SSoby Mathew	/* Get the cpu_ops start and end locations */
89e33b78a6SSoby Mathew	ldr	r4, =(__CPU_OPS_START__ + CPU_MIDR)
90e33b78a6SSoby Mathew	ldr	r5, =(__CPU_OPS_END__ + CPU_MIDR)
91e33b78a6SSoby Mathew
92e33b78a6SSoby Mathew	/* Initialize the return parameter */
93e33b78a6SSoby Mathew	mov	r0, #0
94e33b78a6SSoby Mathew
95e33b78a6SSoby Mathew	/* Read the MIDR_EL1 */
96e33b78a6SSoby Mathew	ldcopr	r2, MIDR
97e33b78a6SSoby Mathew	ldr	r3, =CPU_IMPL_PN_MASK
98e33b78a6SSoby Mathew
99e33b78a6SSoby Mathew	/* Retain only the implementation and part number using mask */
100e33b78a6SSoby Mathew	and	r2, r2, r3
101e33b78a6SSoby Mathew1:
102e33b78a6SSoby Mathew	/* Check if we have reached end of list */
103e33b78a6SSoby Mathew	cmp	r4, r5
104355a5d03SDouglas Raillard	bhs	error_exit
105e33b78a6SSoby Mathew
106e33b78a6SSoby Mathew	/* load the midr from the cpu_ops */
107e33b78a6SSoby Mathew	ldr	r1, [r4], #CPU_OPS_SIZE
108e33b78a6SSoby Mathew	and	r1, r1, r3
109e33b78a6SSoby Mathew
110e33b78a6SSoby Mathew	/* Check if midr matches to midr of this core */
111e33b78a6SSoby Mathew	cmp	r1, r2
112e33b78a6SSoby Mathew	bne	1b
113e33b78a6SSoby Mathew
114e33b78a6SSoby Mathew	/* Subtract the increment and offset to get the cpu-ops pointer */
115e33b78a6SSoby Mathew	sub	r0, r4, #(CPU_OPS_SIZE + CPU_MIDR)
116e3b9cc12SYann Gautier#if ENABLE_ASSERTIONS
117e3b9cc12SYann Gautier	cmp	r0, #0
118e3b9cc12SYann Gautier	ASM_ASSERT(ne)
119e3b9cc12SYann Gautier#endif
120e33b78a6SSoby Mathewerror_exit:
121e33b78a6SSoby Mathew	bx	lr
122e33b78a6SSoby Mathewendfunc get_cpu_ops_ptr
12310bcd761SJeenu Viswambharan
12410bcd761SJeenu Viswambharan/*
12510bcd761SJeenu Viswambharan * Extract CPU revision and variant, and combine them into a single numeric for
12610bcd761SJeenu Viswambharan * easier comparison.
12710bcd761SJeenu Viswambharan */
12810bcd761SJeenu Viswambharan	.globl	cpu_get_rev_var
12910bcd761SJeenu Viswambharanfunc cpu_get_rev_var
13010bcd761SJeenu Viswambharan	ldcopr	r1, MIDR
13110bcd761SJeenu Viswambharan
13210bcd761SJeenu Viswambharan	/*
13310bcd761SJeenu Viswambharan	 * Extract the variant[23:20] and revision[3:0] from r1 and pack it in
13410bcd761SJeenu Viswambharan	 * r0[0:7] as variant[7:4] and revision[3:0]:
13510bcd761SJeenu Viswambharan	 *
13610bcd761SJeenu Viswambharan	 * First extract r1[23:16] to r0[7:0] and zero fill the rest. Then
13710bcd761SJeenu Viswambharan	 * extract r1[3:0] into r0[3:0] retaining other bits.
13810bcd761SJeenu Viswambharan	 */
13910bcd761SJeenu Viswambharan	ubfx	r0, r1, #(MIDR_VAR_SHIFT - MIDR_REV_BITS), #(MIDR_REV_BITS + MIDR_VAR_BITS)
14010bcd761SJeenu Viswambharan	bfi	r0, r1, #MIDR_REV_SHIFT, #MIDR_REV_BITS
14110bcd761SJeenu Viswambharan	bx	lr
14210bcd761SJeenu Viswambharanendfunc cpu_get_rev_var
14310bcd761SJeenu Viswambharan
14410bcd761SJeenu Viswambharan/*
14510bcd761SJeenu Viswambharan * Compare the CPU's revision-variant (r0) with a given value (r1), for errata
14610bcd761SJeenu Viswambharan * application purposes. If the revision-variant is less than or same as a given
14710bcd761SJeenu Viswambharan * value, indicates that errata applies; otherwise not.
14810bcd761SJeenu Viswambharan */
14910bcd761SJeenu Viswambharan	.globl	cpu_rev_var_ls
15010bcd761SJeenu Viswambharanfunc cpu_rev_var_ls
15110bcd761SJeenu Viswambharan	cmp	r0, r1
15210bcd761SJeenu Viswambharan	movls	r0, #ERRATA_APPLIES
15310bcd761SJeenu Viswambharan	movhi	r0, #ERRATA_NOT_APPLIES
15410bcd761SJeenu Viswambharan	bx	lr
15510bcd761SJeenu Viswambharanendfunc cpu_rev_var_ls
15610bcd761SJeenu Viswambharan
15756e04999SDimitris Papastamos/*
15856e04999SDimitris Papastamos * Compare the CPU's revision-variant (r0) with a given value (r1), for errata
15956e04999SDimitris Papastamos * application purposes. If the revision-variant is higher than or same as a
16056e04999SDimitris Papastamos * given value, indicates that errata applies; otherwise not.
16156e04999SDimitris Papastamos */
16256e04999SDimitris Papastamos	.globl	cpu_rev_var_hs
16356e04999SDimitris Papastamosfunc cpu_rev_var_hs
16456e04999SDimitris Papastamos	cmp	r0, r1
16556e04999SDimitris Papastamos	movge	r0, #ERRATA_APPLIES
16656e04999SDimitris Papastamos	movlt	r0, #ERRATA_NOT_APPLIES
16756e04999SDimitris Papastamos	bx	lr
16856e04999SDimitris Papastamosendfunc cpu_rev_var_hs
169