xref: /rk3399_ARM-atf/lib/cpus/aarch32/cpu_helpers.S (revision e33b78a658bd54a815c780e17c2d0073db6f59db)
1*e33b78a6SSoby Mathew/*
2*e33b78a6SSoby Mathew * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
3*e33b78a6SSoby Mathew *
4*e33b78a6SSoby Mathew * Redistribution and use in source and binary forms, with or without
5*e33b78a6SSoby Mathew * modification, are permitted provided that the following conditions are met:
6*e33b78a6SSoby Mathew *
7*e33b78a6SSoby Mathew * Redistributions of source code must retain the above copyright notice, this
8*e33b78a6SSoby Mathew * list of conditions and the following disclaimer.
9*e33b78a6SSoby Mathew *
10*e33b78a6SSoby Mathew * Redistributions in binary form must reproduce the above copyright notice,
11*e33b78a6SSoby Mathew * this list of conditions and the following disclaimer in the documentation
12*e33b78a6SSoby Mathew * and/or other materials provided with the distribution.
13*e33b78a6SSoby Mathew *
14*e33b78a6SSoby Mathew * Neither the name of ARM nor the names of its contributors may be used
15*e33b78a6SSoby Mathew * to endorse or promote products derived from this software without specific
16*e33b78a6SSoby Mathew * prior written permission.
17*e33b78a6SSoby Mathew *
18*e33b78a6SSoby Mathew * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
19*e33b78a6SSoby Mathew * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20*e33b78a6SSoby Mathew * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21*e33b78a6SSoby Mathew * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
22*e33b78a6SSoby Mathew * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23*e33b78a6SSoby Mathew * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24*e33b78a6SSoby Mathew * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25*e33b78a6SSoby Mathew * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26*e33b78a6SSoby Mathew * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27*e33b78a6SSoby Mathew * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
28*e33b78a6SSoby Mathew * POSSIBILITY OF SUCH DAMAGE.
29*e33b78a6SSoby Mathew */
30*e33b78a6SSoby Mathew
31*e33b78a6SSoby Mathew#include <arch.h>
32*e33b78a6SSoby Mathew#include <asm_macros.S>
33*e33b78a6SSoby Mathew#include <assert_macros.S>
34*e33b78a6SSoby Mathew#include <cpu_data.h>
35*e33b78a6SSoby Mathew#include <cpu_macros.S>
36*e33b78a6SSoby Mathew
37*e33b78a6SSoby Mathew	/*
38*e33b78a6SSoby Mathew	 * The reset handler common to all platforms.  After a matching
39*e33b78a6SSoby Mathew	 * cpu_ops structure entry is found, the correponding reset_handler
40*e33b78a6SSoby Mathew	 * in the cpu_ops is invoked. The reset handler is invoked very early
41*e33b78a6SSoby Mathew	 * in the boot sequence and it is assumed that we can clobber r0 - r10
42*e33b78a6SSoby Mathew	 * without the need to follow AAPCS.
43*e33b78a6SSoby Mathew	 * Clobbers: r0 - r10
44*e33b78a6SSoby Mathew	 */
45*e33b78a6SSoby Mathew	.globl	reset_handler
46*e33b78a6SSoby Mathewfunc reset_handler
47*e33b78a6SSoby Mathew	mov	r10, lr
48*e33b78a6SSoby Mathew
49*e33b78a6SSoby Mathew	/* The plat_reset_handler can clobber r0 - r9 */
50*e33b78a6SSoby Mathew	bl	plat_reset_handler
51*e33b78a6SSoby Mathew
52*e33b78a6SSoby Mathew	/* Get the matching cpu_ops pointer (clobbers: r0 - r5) */
53*e33b78a6SSoby Mathew	bl	get_cpu_ops_ptr
54*e33b78a6SSoby Mathew
55*e33b78a6SSoby Mathew#if ASM_ASSERTION
56*e33b78a6SSoby Mathew	cmp	r0, #0
57*e33b78a6SSoby Mathew	ASM_ASSERT(ne)
58*e33b78a6SSoby Mathew#endif
59*e33b78a6SSoby Mathew
60*e33b78a6SSoby Mathew	/* Get the cpu_ops reset handler */
61*e33b78a6SSoby Mathew	ldr	r1, [r0, #CPU_RESET_FUNC]
62*e33b78a6SSoby Mathew	cmp	r1, #0
63*e33b78a6SSoby Mathew	mov	lr, r10
64*e33b78a6SSoby Mathew	bxne	r1
65*e33b78a6SSoby Mathew	bx	lr
66*e33b78a6SSoby Mathewendfunc reset_handler
67*e33b78a6SSoby Mathew
68*e33b78a6SSoby Mathew	/*
69*e33b78a6SSoby Mathew	 * The prepare core power down function for all platforms.  After
70*e33b78a6SSoby Mathew	 * the cpu_ops pointer is retrieved from cpu_data, the corresponding
71*e33b78a6SSoby Mathew	 * pwr_dwn_core in the cpu_ops is invoked. Follows AAPCS.
72*e33b78a6SSoby Mathew	 */
73*e33b78a6SSoby Mathew	.globl	prepare_core_pwr_dwn
74*e33b78a6SSoby Mathewfunc prepare_core_pwr_dwn
75*e33b78a6SSoby Mathew	push	{lr}
76*e33b78a6SSoby Mathew	bl	_cpu_data
77*e33b78a6SSoby Mathew	pop	{lr}
78*e33b78a6SSoby Mathew
79*e33b78a6SSoby Mathew	ldr	r1, [r0, #CPU_DATA_CPU_OPS_PTR]
80*e33b78a6SSoby Mathew#if ASM_ASSERTION
81*e33b78a6SSoby Mathew	cmp	r1, #0
82*e33b78a6SSoby Mathew	ASM_ASSERT(ne)
83*e33b78a6SSoby Mathew#endif
84*e33b78a6SSoby Mathew
85*e33b78a6SSoby Mathew	/* Get the cpu_ops core_pwr_dwn handler */
86*e33b78a6SSoby Mathew	ldr	r0, [r1, #CPU_PWR_DWN_CORE]
87*e33b78a6SSoby Mathew	bx	r0
88*e33b78a6SSoby Mathewendfunc prepare_core_pwr_dwn
89*e33b78a6SSoby Mathew
90*e33b78a6SSoby Mathew	/*
91*e33b78a6SSoby Mathew	 * The prepare cluster power down function for all platforms.  After
92*e33b78a6SSoby Mathew	 * the cpu_ops pointer is retrieved from cpu_data, the corresponding
93*e33b78a6SSoby Mathew	 * pwr_dwn_cluster in the cpu_ops is invoked. Follows AAPCS.
94*e33b78a6SSoby Mathew	 */
95*e33b78a6SSoby Mathew	.globl	prepare_cluster_pwr_dwn
96*e33b78a6SSoby Mathewfunc prepare_cluster_pwr_dwn
97*e33b78a6SSoby Mathew	push	{lr}
98*e33b78a6SSoby Mathew	bl	_cpu_data
99*e33b78a6SSoby Mathew	pop	{lr}
100*e33b78a6SSoby Mathew
101*e33b78a6SSoby Mathew	ldr	r1, [r0, #CPU_DATA_CPU_OPS_PTR]
102*e33b78a6SSoby Mathew#if ASM_ASSERTION
103*e33b78a6SSoby Mathew	cmp	r1, #0
104*e33b78a6SSoby Mathew	ASM_ASSERT(ne)
105*e33b78a6SSoby Mathew#endif
106*e33b78a6SSoby Mathew
107*e33b78a6SSoby Mathew	/* Get the cpu_ops cluster_pwr_dwn handler */
108*e33b78a6SSoby Mathew	ldr	r0, [r1, #CPU_PWR_DWN_CLUSTER]
109*e33b78a6SSoby Mathew	bx	r0
110*e33b78a6SSoby Mathewendfunc prepare_cluster_pwr_dwn
111*e33b78a6SSoby Mathew
112*e33b78a6SSoby Mathew	/*
113*e33b78a6SSoby Mathew	 * Initializes the cpu_ops_ptr if not already initialized
114*e33b78a6SSoby Mathew	 * in cpu_data. This must only be called after the data cache
115*e33b78a6SSoby Mathew	 * is enabled. AAPCS is followed.
116*e33b78a6SSoby Mathew	 */
117*e33b78a6SSoby Mathew	.globl	init_cpu_ops
118*e33b78a6SSoby Mathewfunc init_cpu_ops
119*e33b78a6SSoby Mathew	push	{r4 - r6, lr}
120*e33b78a6SSoby Mathew	bl	_cpu_data
121*e33b78a6SSoby Mathew	mov	r6, r0
122*e33b78a6SSoby Mathew	ldr	r1, [r0, #CPU_DATA_CPU_OPS_PTR]
123*e33b78a6SSoby Mathew	cmp	r1, #0
124*e33b78a6SSoby Mathew	bne	1f
125*e33b78a6SSoby Mathew	bl	get_cpu_ops_ptr
126*e33b78a6SSoby Mathew#if ASM_ASSERTION
127*e33b78a6SSoby Mathew	cmp	r0, #0
128*e33b78a6SSoby Mathew	ASM_ASSERT(ne)
129*e33b78a6SSoby Mathew#endif
130*e33b78a6SSoby Mathew	str	r0, [r6, #CPU_DATA_CPU_OPS_PTR]!
131*e33b78a6SSoby Mathew1:
132*e33b78a6SSoby Mathew	pop	{r4 - r6, pc}
133*e33b78a6SSoby Mathewendfunc init_cpu_ops
134*e33b78a6SSoby Mathew
135*e33b78a6SSoby Mathew	/*
136*e33b78a6SSoby Mathew	 * The below function returns the cpu_ops structure matching the
137*e33b78a6SSoby Mathew	 * midr of the core. It reads the MIDR and finds the matching
138*e33b78a6SSoby Mathew	 * entry in cpu_ops entries. Only the implementation and part number
139*e33b78a6SSoby Mathew	 * are used to match the entries.
140*e33b78a6SSoby Mathew	 * Return :
141*e33b78a6SSoby Mathew	 *     r0 - The matching cpu_ops pointer on Success
142*e33b78a6SSoby Mathew	 *     r0 - 0 on failure.
143*e33b78a6SSoby Mathew	 * Clobbers: r0 - r5
144*e33b78a6SSoby Mathew	 */
145*e33b78a6SSoby Mathew	.globl	get_cpu_ops_ptr
146*e33b78a6SSoby Mathewfunc get_cpu_ops_ptr
147*e33b78a6SSoby Mathew	/* Get the cpu_ops start and end locations */
148*e33b78a6SSoby Mathew	ldr	r4, =(__CPU_OPS_START__ + CPU_MIDR)
149*e33b78a6SSoby Mathew	ldr	r5, =(__CPU_OPS_END__ + CPU_MIDR)
150*e33b78a6SSoby Mathew
151*e33b78a6SSoby Mathew	/* Initialize the return parameter */
152*e33b78a6SSoby Mathew	mov	r0, #0
153*e33b78a6SSoby Mathew
154*e33b78a6SSoby Mathew	/* Read the MIDR_EL1 */
155*e33b78a6SSoby Mathew	ldcopr	r2, MIDR
156*e33b78a6SSoby Mathew	ldr	r3, =CPU_IMPL_PN_MASK
157*e33b78a6SSoby Mathew
158*e33b78a6SSoby Mathew	/* Retain only the implementation and part number using mask */
159*e33b78a6SSoby Mathew	and	r2, r2, r3
160*e33b78a6SSoby Mathew1:
161*e33b78a6SSoby Mathew	/* Check if we have reached end of list */
162*e33b78a6SSoby Mathew	cmp	r4, r5
163*e33b78a6SSoby Mathew	bge	error_exit
164*e33b78a6SSoby Mathew
165*e33b78a6SSoby Mathew	/* load the midr from the cpu_ops */
166*e33b78a6SSoby Mathew	ldr	r1, [r4], #CPU_OPS_SIZE
167*e33b78a6SSoby Mathew	and	r1, r1, r3
168*e33b78a6SSoby Mathew
169*e33b78a6SSoby Mathew	/* Check if midr matches to midr of this core */
170*e33b78a6SSoby Mathew	cmp	r1, r2
171*e33b78a6SSoby Mathew	bne	1b
172*e33b78a6SSoby Mathew
173*e33b78a6SSoby Mathew	/* Subtract the increment and offset to get the cpu-ops pointer */
174*e33b78a6SSoby Mathew	sub	r0, r4, #(CPU_OPS_SIZE + CPU_MIDR)
175*e33b78a6SSoby Mathewerror_exit:
176*e33b78a6SSoby Mathew	bx	lr
177*e33b78a6SSoby Mathewendfunc get_cpu_ops_ptr
178