xref: /rk3399_ARM-atf/bl31/aarch64/bl31_entrypoint.S (revision 18f2efd67d881fe0a9a535ce9e801e60d746e024)
14f6ad66aSAchin Gupta/*
2d50ece03SAntonio Nino Diaz * Copyright (c) 2013-2017, ARM Limited and Contributors. All rights reserved.
34f6ad66aSAchin Gupta *
482cb2c1aSdp-arm * SPDX-License-Identifier: BSD-3-Clause
54f6ad66aSAchin Gupta */
64f6ad66aSAchin Gupta
7c10bd2ceSSandrine Bailleux#include <arch.h>
897043ac9SDan Handley#include <bl_common.h>
952010cc7SSandrine Bailleux#include <el3_common_macros.S>
10872be88aSdp-arm#include <pmf_asm_macros.S>
11872be88aSdp-arm#include <runtime_instr.h>
12d50ece03SAntonio Nino Diaz#include <xlat_tables_defs.h>
134f6ad66aSAchin Gupta
144f6ad66aSAchin Gupta	.globl	bl31_entrypoint
15cf0b1492SSoby Mathew	.globl	bl31_warm_entrypoint
164f6ad66aSAchin Gupta
174f6ad66aSAchin Gupta	/* -----------------------------------------------------
184f6ad66aSAchin Gupta	 * bl31_entrypoint() is the cold boot entrypoint,
194f6ad66aSAchin Gupta	 * executed only by the primary cpu.
204f6ad66aSAchin Gupta	 * -----------------------------------------------------
214f6ad66aSAchin Gupta	 */
224f6ad66aSAchin Gupta
230a30cf54SAndrew Thoelkefunc bl31_entrypoint
2452010cc7SSandrine Bailleux#if !RESET_TO_BL31
254112bfa0SVikram Kanigiri	/* ---------------------------------------------------------------
264112bfa0SVikram Kanigiri	 * Preceding bootloader has populated x0 with a pointer to a
274112bfa0SVikram Kanigiri	 * 'bl31_params' structure & x1 with a pointer to platform
284112bfa0SVikram Kanigiri	 * specific structure
294112bfa0SVikram Kanigiri	 * ---------------------------------------------------------------
30c10bd2ceSSandrine Bailleux	 */
3129fb905dSVikram Kanigiri	mov	x20, x0
3229fb905dSVikram Kanigiri	mov	x21, x1
33c10bd2ceSSandrine Bailleux
344f603683SHarry Liebel	/* ---------------------------------------------------------------------
3552010cc7SSandrine Bailleux	 * For !RESET_TO_BL31 systems, only the primary CPU ever reaches
3652010cc7SSandrine Bailleux	 * bl31_entrypoint() during the cold boot flow, so the cold/warm boot
3752010cc7SSandrine Bailleux	 * and primary/secondary CPU logic should not be executed in this case.
384f603683SHarry Liebel	 *
39*18f2efd6SDavid Cunado	 * Also, assume that the previous bootloader has already initialised the
40*18f2efd6SDavid Cunado	 * SCTLR_EL3, including the endianness, and has initialised the memory.
414f603683SHarry Liebel	 * ---------------------------------------------------------------------
424f603683SHarry Liebel	 */
4352010cc7SSandrine Bailleux	el3_entrypoint_common					\
44*18f2efd6SDavid Cunado		_init_sctlr=0					\
4552010cc7SSandrine Bailleux		_warm_boot_mailbox=0				\
4652010cc7SSandrine Bailleux		_secondary_cold_boot=0				\
4752010cc7SSandrine Bailleux		_init_memory=0					\
4852010cc7SSandrine Bailleux		_init_c_runtime=1				\
4952010cc7SSandrine Bailleux		_exception_vectors=runtime_exceptions
504f603683SHarry Liebel
5152010cc7SSandrine Bailleux	/* ---------------------------------------------------------------------
5252010cc7SSandrine Bailleux	 * Relay the previous bootloader's arguments to the platform layer
5352010cc7SSandrine Bailleux	 * ---------------------------------------------------------------------
5403396c43SVikram Kanigiri	 */
5552010cc7SSandrine Bailleux	mov	x0, x20
5652010cc7SSandrine Bailleux	mov	x1, x21
5752010cc7SSandrine Bailleux#else
58bf031bbaSSandrine Bailleux	/* ---------------------------------------------------------------------
59bf031bbaSSandrine Bailleux	 * For RESET_TO_BL31 systems which have a programmable reset address,
60bf031bbaSSandrine Bailleux	 * bl31_entrypoint() is executed only on the cold boot path so we can
61bf031bbaSSandrine Bailleux	 * skip the warm boot mailbox mechanism.
62bf031bbaSSandrine Bailleux	 * ---------------------------------------------------------------------
63bf031bbaSSandrine Bailleux	 */
6452010cc7SSandrine Bailleux	el3_entrypoint_common					\
65*18f2efd6SDavid Cunado		_init_sctlr=1					\
66bf031bbaSSandrine Bailleux		_warm_boot_mailbox=!PROGRAMMABLE_RESET_ADDRESS	\
67a9bec67dSSandrine Bailleux		_secondary_cold_boot=!COLD_BOOT_SINGLE_CPU	\
6852010cc7SSandrine Bailleux		_init_memory=1					\
6952010cc7SSandrine Bailleux		_init_c_runtime=1				\
7052010cc7SSandrine Bailleux		_exception_vectors=runtime_exceptions
714f6ad66aSAchin Gupta
7252010cc7SSandrine Bailleux	/* ---------------------------------------------------------------------
73d178637dSJuan Castillo	 * For RESET_TO_BL31 systems, BL31 is the first bootloader to run so
7452010cc7SSandrine Bailleux	 * there's no argument to relay from a previous bootloader. Zero the
7552010cc7SSandrine Bailleux	 * arguments passed to the platform layer to reflect that.
7652010cc7SSandrine Bailleux	 * ---------------------------------------------------------------------
7765f546a1SSandrine Bailleux	 */
7852010cc7SSandrine Bailleux	mov	x0, 0
7952010cc7SSandrine Bailleux	mov	x1, 0
8052010cc7SSandrine Bailleux#endif /* RESET_TO_BL31 */
814f6ad66aSAchin Gupta
824f6ad66aSAchin Gupta	/* ---------------------------------------------
834f6ad66aSAchin Gupta	 * Perform platform specific early arch. setup
844f6ad66aSAchin Gupta	 * ---------------------------------------------
854f6ad66aSAchin Gupta	 */
864f6ad66aSAchin Gupta	bl	bl31_early_platform_setup
874f6ad66aSAchin Gupta	bl	bl31_plat_arch_setup
884f6ad66aSAchin Gupta
894f6ad66aSAchin Gupta	/* ---------------------------------------------
904f6ad66aSAchin Gupta	 * Jump to main function.
914f6ad66aSAchin Gupta	 * ---------------------------------------------
924f6ad66aSAchin Gupta	 */
934f6ad66aSAchin Gupta	bl	bl31_main
944f6ad66aSAchin Gupta
9554dc71e7SAchin Gupta	/* -------------------------------------------------------------
9654dc71e7SAchin Gupta	 * Clean the .data & .bss sections to main memory. This ensures
9754dc71e7SAchin Gupta	 * that any global data which was initialised by the primary CPU
9854dc71e7SAchin Gupta	 * is visible to secondary CPUs before they enable their data
9954dc71e7SAchin Gupta	 * caches and participate in coherency.
10054dc71e7SAchin Gupta	 * -------------------------------------------------------------
10154dc71e7SAchin Gupta	 */
10254dc71e7SAchin Gupta	adr	x0, __DATA_START__
10354dc71e7SAchin Gupta	adr	x1, __DATA_END__
10454dc71e7SAchin Gupta	sub	x1, x1, x0
10554dc71e7SAchin Gupta	bl	clean_dcache_range
10654dc71e7SAchin Gupta
10754dc71e7SAchin Gupta	adr	x0, __BSS_START__
10854dc71e7SAchin Gupta	adr	x1, __BSS_END__
10954dc71e7SAchin Gupta	sub	x1, x1, x0
11054dc71e7SAchin Gupta	bl	clean_dcache_range
11154dc71e7SAchin Gupta
112caa84939SJeenu Viswambharan	b	el3_exit
1138b779620SKévin Petitendfunc bl31_entrypoint
114cf0b1492SSoby Mathew
115cf0b1492SSoby Mathew	/* --------------------------------------------------------------------
116cf0b1492SSoby Mathew	 * This CPU has been physically powered up. It is either resuming from
117cf0b1492SSoby Mathew	 * suspend or has simply been turned on. In both cases, call the BL31
118cf0b1492SSoby Mathew	 * warmboot entrypoint
119cf0b1492SSoby Mathew	 * --------------------------------------------------------------------
120cf0b1492SSoby Mathew	 */
121cf0b1492SSoby Mathewfunc bl31_warm_entrypoint
122872be88aSdp-arm#if ENABLE_RUNTIME_INSTRUMENTATION
123872be88aSdp-arm
124872be88aSdp-arm	/*
125872be88aSdp-arm	 * This timestamp update happens with cache off.  The next
126872be88aSdp-arm	 * timestamp collection will need to do cache maintenance prior
127872be88aSdp-arm	 * to timestamp update.
128872be88aSdp-arm	 */
129872be88aSdp-arm	pmf_calc_timestamp_addr rt_instr_svc RT_INSTR_EXIT_HW_LOW_PWR
130872be88aSdp-arm	mrs	x1, cntpct_el0
131872be88aSdp-arm	str	x1, [x0]
132872be88aSdp-arm#endif
133872be88aSdp-arm
134cf0b1492SSoby Mathew	/*
135cf0b1492SSoby Mathew	 * On the warm boot path, most of the EL3 initialisations performed by
136cf0b1492SSoby Mathew	 * 'el3_entrypoint_common' must be skipped:
137cf0b1492SSoby Mathew	 *
138cf0b1492SSoby Mathew	 *  - Only when the platform bypasses the BL1/BL31 entrypoint by
139*18f2efd6SDavid Cunado	 *    programming the reset address do we need to initialise SCTLR_EL3.
140cf0b1492SSoby Mathew	 *    In other cases, we assume this has been taken care by the
141cf0b1492SSoby Mathew	 *    entrypoint code.
142cf0b1492SSoby Mathew	 *
143cf0b1492SSoby Mathew	 *  - No need to determine the type of boot, we know it is a warm boot.
144cf0b1492SSoby Mathew	 *
145cf0b1492SSoby Mathew	 *  - Do not try to distinguish between primary and secondary CPUs, this
146cf0b1492SSoby Mathew	 *    notion only exists for a cold boot.
147cf0b1492SSoby Mathew	 *
148cf0b1492SSoby Mathew	 *  - No need to initialise the memory or the C runtime environment,
149cf0b1492SSoby Mathew	 *    it has been done once and for all on the cold boot path.
150cf0b1492SSoby Mathew	 */
151cf0b1492SSoby Mathew	el3_entrypoint_common					\
152*18f2efd6SDavid Cunado		_init_sctlr=PROGRAMMABLE_RESET_ADDRESS		\
153cf0b1492SSoby Mathew		_warm_boot_mailbox=0				\
154cf0b1492SSoby Mathew		_secondary_cold_boot=0				\
155cf0b1492SSoby Mathew		_init_memory=0					\
156cf0b1492SSoby Mathew		_init_c_runtime=0				\
157cf0b1492SSoby Mathew		_exception_vectors=runtime_exceptions
158cf0b1492SSoby Mathew
15925a93f7cSJeenu Viswambharan	/*
16025a93f7cSJeenu Viswambharan	 * We're about to enable MMU and participate in PSCI state coordination.
16125a93f7cSJeenu Viswambharan	 *
16225a93f7cSJeenu Viswambharan	 * The PSCI implementation invokes platform routines that enable CPUs to
16325a93f7cSJeenu Viswambharan	 * participate in coherency. On a system where CPUs are not
164bcc3c49cSSoby Mathew	 * cache-coherent without appropriate platform specific programming,
165bcc3c49cSSoby Mathew	 * having caches enabled until such time might lead to coherency issues
166bcc3c49cSSoby Mathew	 * (resulting from stale data getting speculatively fetched, among
167bcc3c49cSSoby Mathew	 * others). Therefore we keep data caches disabled even after enabling
168bcc3c49cSSoby Mathew	 * the MMU for such platforms.
16925a93f7cSJeenu Viswambharan	 *
170bcc3c49cSSoby Mathew	 * On systems with hardware-assisted coherency, or on single cluster
171bcc3c49cSSoby Mathew	 * platforms, such platform specific programming is not required to
172bcc3c49cSSoby Mathew	 * enter coherency (as CPUs already are); and there's no reason to have
173bcc3c49cSSoby Mathew	 * caches disabled either.
174cf0b1492SSoby Mathew	 */
175cf0b1492SSoby Mathew	mov	x0, #DISABLE_DCACHE
176cf0b1492SSoby Mathew	bl	bl31_plat_enable_mmu
177cf0b1492SSoby Mathew
178bcc3c49cSSoby Mathew#if HW_ASSISTED_COHERENCY || WARMBOOT_ENABLE_DCACHE_EARLY
179bcc3c49cSSoby Mathew	mrs	x0, sctlr_el3
180bcc3c49cSSoby Mathew	orr	x0, x0, #SCTLR_C_BIT
181bcc3c49cSSoby Mathew	msr	sctlr_el3, x0
182bcc3c49cSSoby Mathew	isb
183bcc3c49cSSoby Mathew#endif
184bcc3c49cSSoby Mathew
185cf0b1492SSoby Mathew	bl	psci_warmboot_entrypoint
186cf0b1492SSoby Mathew
187872be88aSdp-arm#if ENABLE_RUNTIME_INSTRUMENTATION
188872be88aSdp-arm	pmf_calc_timestamp_addr rt_instr_svc RT_INSTR_EXIT_PSCI
189872be88aSdp-arm	mov	x19, x0
190872be88aSdp-arm
191872be88aSdp-arm	/*
192872be88aSdp-arm	 * Invalidate before updating timestamp to ensure previous timestamp
193872be88aSdp-arm	 * updates on the same cache line with caches disabled are properly
194872be88aSdp-arm	 * seen by the same core. Without the cache invalidate, the core might
195872be88aSdp-arm	 * write into a stale cache line.
196872be88aSdp-arm	 */
197872be88aSdp-arm	mov	x1, #PMF_TS_SIZE
198872be88aSdp-arm	mov	x20, x30
199872be88aSdp-arm	bl	inv_dcache_range
200872be88aSdp-arm	mov	x30, x20
201872be88aSdp-arm
202872be88aSdp-arm	mrs	x0, cntpct_el0
203872be88aSdp-arm	str	x0, [x19]
204872be88aSdp-arm#endif
205cf0b1492SSoby Mathew	b	el3_exit
206cf0b1492SSoby Mathewendfunc bl31_warm_entrypoint
207