xref: /rk3399_ARM-atf/lib/cpus/aarch64/cpu_helpers.S (revision 9b4768417051ead50135d1d7675cab940d864e8d)
1*9b476841SSoby Mathew/*
2*9b476841SSoby Mathew * Copyright (c) 2014, ARM Limited and Contributors. All rights reserved.
3*9b476841SSoby Mathew *
4*9b476841SSoby Mathew * Redistribution and use in source and binary forms, with or without
5*9b476841SSoby Mathew * modification, are permitted provided that the following conditions are met:
6*9b476841SSoby Mathew *
7*9b476841SSoby Mathew * Redistributions of source code must retain the above copyright notice, this
8*9b476841SSoby Mathew * list of conditions and the following disclaimer.
9*9b476841SSoby Mathew *
10*9b476841SSoby Mathew * Redistributions in binary form must reproduce the above copyright notice,
11*9b476841SSoby Mathew * this list of conditions and the following disclaimer in the documentation
12*9b476841SSoby Mathew * and/or other materials provided with the distribution.
13*9b476841SSoby Mathew *
14*9b476841SSoby Mathew * Neither the name of ARM nor the names of its contributors may be used
15*9b476841SSoby Mathew * to endorse or promote products derived from this software without specific
16*9b476841SSoby Mathew * prior written permission.
17*9b476841SSoby Mathew *
18*9b476841SSoby Mathew * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
19*9b476841SSoby Mathew * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20*9b476841SSoby Mathew * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21*9b476841SSoby Mathew * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
22*9b476841SSoby Mathew * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23*9b476841SSoby Mathew * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24*9b476841SSoby Mathew * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25*9b476841SSoby Mathew * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26*9b476841SSoby Mathew * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27*9b476841SSoby Mathew * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
28*9b476841SSoby Mathew * POSSIBILITY OF SUCH DAMAGE.
29*9b476841SSoby Mathew */
30*9b476841SSoby Mathew
31*9b476841SSoby Mathew#include <arch.h>
32*9b476841SSoby Mathew#include <asm_macros.S>
33*9b476841SSoby Mathew#include <assert_macros.S>
34*9b476841SSoby Mathew#include <cpu_macros.S>
35*9b476841SSoby Mathew#if IMAGE_BL31
36*9b476841SSoby Mathew#include <cpu_data.h>
37*9b476841SSoby Mathew#endif
38*9b476841SSoby Mathew
39*9b476841SSoby Mathew /* Reset fn is needed in BL at reset vector */
40*9b476841SSoby Mathew#if IMAGE_BL1 || (IMAGE_BL31 && RESET_TO_BL31)
41*9b476841SSoby Mathew	/*
42*9b476841SSoby Mathew	 * The reset handler common to all platforms.  After a matching
43*9b476841SSoby Mathew	 * cpu_ops structure entry is found, the correponding reset_handler
44*9b476841SSoby Mathew	 * in the cpu_ops is invoked.
45*9b476841SSoby Mathew	 */
46*9b476841SSoby Mathew	.globl	reset_handler
47*9b476841SSoby Mathewfunc reset_handler
48*9b476841SSoby Mathew	mov	x10, x30
49*9b476841SSoby Mathew
50*9b476841SSoby Mathew	/* Get the matching cpu_ops pointer */
51*9b476841SSoby Mathew	bl	get_cpu_ops_ptr
52*9b476841SSoby Mathew#if ASM_ASSERTION
53*9b476841SSoby Mathew	cmp	x0, #0
54*9b476841SSoby Mathew	ASM_ASSERT(ne)
55*9b476841SSoby Mathew#endif
56*9b476841SSoby Mathew
57*9b476841SSoby Mathew	/* Get the cpu_ops reset handler */
58*9b476841SSoby Mathew	ldr	x2, [x0, #CPU_RESET_FUNC]
59*9b476841SSoby Mathew	cbz	x2, 1f
60*9b476841SSoby Mathew	blr	x2
61*9b476841SSoby Mathew1:
62*9b476841SSoby Mathew	ret	x10
63*9b476841SSoby Mathew#endif /* IMAGE_BL1 || (IMAGE_BL31 && RESET_TO_BL31) */
64*9b476841SSoby Mathew
65*9b476841SSoby Mathew	/*
66*9b476841SSoby Mathew	 * The below function returns the cpu_ops structure matching the
67*9b476841SSoby Mathew	 * midr of the core. It reads the MIDR_EL1 and finds the matching
68*9b476841SSoby Mathew	 * entry in cpu_ops entries. Only the implementation and part number
69*9b476841SSoby Mathew	 * are used to match the entries.
70*9b476841SSoby Mathew	 * Return :
71*9b476841SSoby Mathew	 *     x0 - The matching cpu_ops pointer on Success
72*9b476841SSoby Mathew	 *     x0 - 0 on failure.
73*9b476841SSoby Mathew	 * Clobbers : x0 - x5
74*9b476841SSoby Mathew	 */
75*9b476841SSoby Mathew	.globl	get_cpu_ops_ptr
76*9b476841SSoby Mathewfunc get_cpu_ops_ptr
77*9b476841SSoby Mathew	/* Get the cpu_ops start and end locations */
78*9b476841SSoby Mathew	adr	x4, (__CPU_OPS_START__ + CPU_MIDR)
79*9b476841SSoby Mathew	adr	x5, (__CPU_OPS_END__ + CPU_MIDR)
80*9b476841SSoby Mathew
81*9b476841SSoby Mathew	/* Initialize the return parameter */
82*9b476841SSoby Mathew	mov	x0, #0
83*9b476841SSoby Mathew
84*9b476841SSoby Mathew	/* Read the MIDR_EL1 */
85*9b476841SSoby Mathew	mrs	x2, midr_el1
86*9b476841SSoby Mathew	mov_imm	x3, CPU_IMPL_PN_MASK
87*9b476841SSoby Mathew
88*9b476841SSoby Mathew	/* Retain only the implementation and part number using mask */
89*9b476841SSoby Mathew	and	w2, w2, w3
90*9b476841SSoby Mathew1:
91*9b476841SSoby Mathew	/* Check if we have reached end of list */
92*9b476841SSoby Mathew	cmp	x4, x5
93*9b476841SSoby Mathew	b.eq	error_exit
94*9b476841SSoby Mathew
95*9b476841SSoby Mathew	/* load the midr from the cpu_ops */
96*9b476841SSoby Mathew	ldr	x1, [x4], #CPU_OPS_SIZE
97*9b476841SSoby Mathew	and	w1, w1, w3
98*9b476841SSoby Mathew
99*9b476841SSoby Mathew	/* Check if midr matches to midr of this core */
100*9b476841SSoby Mathew	cmp	w1, w2
101*9b476841SSoby Mathew	b.ne	1b
102*9b476841SSoby Mathew
103*9b476841SSoby Mathew	/* Subtract the increment and offset to get the cpu-ops pointer */
104*9b476841SSoby Mathew	sub	x0, x4, #(CPU_OPS_SIZE + CPU_MIDR)
105*9b476841SSoby Mathewerror_exit:
106*9b476841SSoby Mathew	ret
107