xref: /rk3399_ARM-atf/plat/common/aarch64/platform_helpers.S (revision ab2d31edbd9dea69bd1ca495e3fce0511c9d42ff)
14f6ad66aSAchin Gupta/*
2*ab2d31edSDan Handley * Copyright (c) 2013, ARM Limited and Contributors. All rights reserved.
34f6ad66aSAchin Gupta *
44f6ad66aSAchin Gupta * Redistribution and use in source and binary forms, with or without
54f6ad66aSAchin Gupta * modification, are permitted provided that the following conditions are met:
64f6ad66aSAchin Gupta *
74f6ad66aSAchin Gupta * Redistributions of source code must retain the above copyright notice, this
84f6ad66aSAchin Gupta * list of conditions and the following disclaimer.
94f6ad66aSAchin Gupta *
104f6ad66aSAchin Gupta * Redistributions in binary form must reproduce the above copyright notice,
114f6ad66aSAchin Gupta * this list of conditions and the following disclaimer in the documentation
124f6ad66aSAchin Gupta * and/or other materials provided with the distribution.
134f6ad66aSAchin Gupta *
144f6ad66aSAchin Gupta * Neither the name of ARM nor the names of its contributors may be used
154f6ad66aSAchin Gupta * to endorse or promote products derived from this software without specific
164f6ad66aSAchin Gupta * prior written permission.
174f6ad66aSAchin Gupta *
184f6ad66aSAchin Gupta * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
194f6ad66aSAchin Gupta * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
204f6ad66aSAchin Gupta * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
214f6ad66aSAchin Gupta * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
224f6ad66aSAchin Gupta * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
234f6ad66aSAchin Gupta * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
244f6ad66aSAchin Gupta * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
254f6ad66aSAchin Gupta * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
264f6ad66aSAchin Gupta * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
274f6ad66aSAchin Gupta * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
284f6ad66aSAchin Gupta * POSSIBILITY OF SUCH DAMAGE.
294f6ad66aSAchin Gupta */
304f6ad66aSAchin Gupta
314f6ad66aSAchin Gupta#include <arch.h>
324f6ad66aSAchin Gupta#include <platform.h>
334f6ad66aSAchin Gupta
344f6ad66aSAchin Gupta
354f6ad66aSAchin Gupta	.globl	pcpu_dv_mem_stack
364f6ad66aSAchin Gupta	.weak	platform_get_core_pos
374f6ad66aSAchin Gupta	.weak	platform_set_stack
384f6ad66aSAchin Gupta	.weak	platform_is_primary_cpu
394f6ad66aSAchin Gupta	.weak	platform_set_coherent_stack
404f6ad66aSAchin Gupta	.weak	platform_check_mpidr
414f6ad66aSAchin Gupta	.weak	plat_report_exception
424f6ad66aSAchin Gupta
434f6ad66aSAchin Gupta	/* -----------------------------------------------------
444f6ad66aSAchin Gupta	 * 512 bytes of coherent stack for each cpu
454f6ad66aSAchin Gupta	 * -----------------------------------------------------
464f6ad66aSAchin Gupta	 */
474f6ad66aSAchin Gupta#define PCPU_DV_MEM_STACK_SIZE	0x200
484f6ad66aSAchin Gupta
494f6ad66aSAchin Gupta
504f6ad66aSAchin Gupta	.section	.text, "ax"; .align 3
514f6ad66aSAchin Gupta
524f6ad66aSAchin Gupta	/* -----------------------------------------------------
534f6ad66aSAchin Gupta	 * unsigned long long platform_set_coherent_stack
544f6ad66aSAchin Gupta	 *                                    (unsigned mpidr);
554f6ad66aSAchin Gupta	 * For a given mpidr, this function returns the stack
564f6ad66aSAchin Gupta	 * pointer allocated in device memory. This stack can
574f6ad66aSAchin Gupta	 * be used by C code which enables/disables the SCTLR.M
584f6ad66aSAchin Gupta	 * SCTLR.C bit e.g. while powering down a cpu
594f6ad66aSAchin Gupta	 * -----------------------------------------------------
604f6ad66aSAchin Gupta	 */
614f6ad66aSAchin Guptaplatform_set_coherent_stack:; .type platform_set_coherent_stack, %function
624f6ad66aSAchin Gupta	mov	x5, x30 // lr
634f6ad66aSAchin Gupta	bl	platform_get_core_pos
644f6ad66aSAchin Gupta	add	x0, x0, #1
654f6ad66aSAchin Gupta	mov	x1, #PCPU_DV_MEM_STACK_SIZE
664f6ad66aSAchin Gupta	mul	x0, x0, x1
674f6ad66aSAchin Gupta	ldr	x1, =pcpu_dv_mem_stack
684f6ad66aSAchin Gupta	add	sp, x1, x0
694f6ad66aSAchin Gupta	ret	x5
704f6ad66aSAchin Gupta
714f6ad66aSAchin Gupta
724f6ad66aSAchin Gupta	/* -----------------------------------------------------
734f6ad66aSAchin Gupta	 *  int platform_get_core_pos(int mpidr);
744f6ad66aSAchin Gupta	 *  With this function: CorePos = (ClusterId * 4) +
754f6ad66aSAchin Gupta	 *  				  CoreId
764f6ad66aSAchin Gupta	 * -----------------------------------------------------
774f6ad66aSAchin Gupta	 */
784f6ad66aSAchin Guptaplatform_get_core_pos:; .type platform_get_core_pos, %function
794f6ad66aSAchin Gupta	and	x1, x0, #MPIDR_CPU_MASK
804f6ad66aSAchin Gupta	and	x0, x0, #MPIDR_CLUSTER_MASK
814f6ad66aSAchin Gupta	add	x0, x1, x0, LSR #6
824f6ad66aSAchin Gupta	ret
834f6ad66aSAchin Gupta
844f6ad66aSAchin Gupta
854f6ad66aSAchin Gupta	/* -----------------------------------------------------
864f6ad66aSAchin Gupta	 * void platform_is_primary_cpu (unsigned int mpid);
874f6ad66aSAchin Gupta	 *
884f6ad66aSAchin Gupta	 * Given the mpidr say whether this cpu is the primary
894f6ad66aSAchin Gupta	 * cpu (applicable ony after a cold boot)
904f6ad66aSAchin Gupta	 * -----------------------------------------------------
914f6ad66aSAchin Gupta	 */
924f6ad66aSAchin Guptaplatform_is_primary_cpu:; .type platform_is_primary_cpu, %function
934f6ad66aSAchin Gupta	and	x0, x0, #(MPIDR_CLUSTER_MASK | MPIDR_CPU_MASK)
944f6ad66aSAchin Gupta	cmp	x0, #PRIMARY_CPU
954f6ad66aSAchin Gupta	cset	x0, eq
964f6ad66aSAchin Gupta	ret
974f6ad66aSAchin Gupta
984f6ad66aSAchin Gupta
994f6ad66aSAchin Gupta	/* -----------------------------------------------------
1004f6ad66aSAchin Gupta	 * void platform_set_stack (int mpidr)
1014f6ad66aSAchin Gupta	 * -----------------------------------------------------
1024f6ad66aSAchin Gupta	 */
1034f6ad66aSAchin Guptaplatform_set_stack:; .type platform_set_stack, %function
1044f6ad66aSAchin Gupta	mov	x9, x30 // lr
1054f6ad66aSAchin Gupta	bl	platform_get_core_pos
1064f6ad66aSAchin Gupta	add	x0, x0, #1
1074f6ad66aSAchin Gupta	mov	x1, #PLATFORM_STACK_SIZE
1084f6ad66aSAchin Gupta	mul	x0, x0, x1
1094f6ad66aSAchin Gupta	ldr	x1, =platform_normal_stacks
1104f6ad66aSAchin Gupta	add	sp, x1, x0
1114f6ad66aSAchin Gupta	ret	x9
1124f6ad66aSAchin Gupta
1134f6ad66aSAchin Gupta	/* -----------------------------------------------------
1144f6ad66aSAchin Gupta	 * Placeholder function which should be redefined by
1154f6ad66aSAchin Gupta	 * each platform.
1164f6ad66aSAchin Gupta	 * -----------------------------------------------------
1174f6ad66aSAchin Gupta	 */
1184f6ad66aSAchin Guptaplatform_check_mpidr:; .type platform_check_mpidr, %function
1194f6ad66aSAchin Gupta	mov	x0, xzr
1204f6ad66aSAchin Gupta	ret
1214f6ad66aSAchin Gupta
1224f6ad66aSAchin Gupta	/* -----------------------------------------------------
1234f6ad66aSAchin Gupta	 * Placeholder function which should be redefined by
1244f6ad66aSAchin Gupta	 * each platform.
1254f6ad66aSAchin Gupta	 * -----------------------------------------------------
1264f6ad66aSAchin Gupta	 */
1274f6ad66aSAchin Guptaplat_report_exception:
1284f6ad66aSAchin Gupta	ret
1294f6ad66aSAchin Gupta
1304f6ad66aSAchin Gupta	/* -----------------------------------------------------
1314f6ad66aSAchin Gupta	 * Per-cpu stacks in device memory.
1324f6ad66aSAchin Gupta	 * Used for C code just before power down or right after
1334f6ad66aSAchin Gupta	 * power up when the MMU or caches need to be turned on
1344f6ad66aSAchin Gupta	 * or off. Each cpu gets a stack of 512 bytes.
1354f6ad66aSAchin Gupta	 * -----------------------------------------------------
1364f6ad66aSAchin Gupta	 */
1374f6ad66aSAchin Gupta	.section	tzfw_coherent_mem, "aw", %nobits; .align 6
1384f6ad66aSAchin Gupta
1394f6ad66aSAchin Guptapcpu_dv_mem_stack:
1404f6ad66aSAchin Gupta	/* Zero fill */
1414f6ad66aSAchin Gupta	.space (PLATFORM_CORE_COUNT * PCPU_DV_MEM_STACK_SIZE), 0
142