xref: /rk3399_ARM-atf/lib/per_cpu/aarch64/per_cpu_asm.S (revision 7303319b3823e9e33748d963e9173f3678aba4da)
1*962958d3SRohit Mathew/*
2*962958d3SRohit Mathew * Copyright (c) 2025, Arm Limited and Contributors. All rights reserved.
3*962958d3SRohit Mathew *
4*962958d3SRohit Mathew * SPDX-License-Identifier: BSD-3-Clause
5*962958d3SRohit Mathew */
6*962958d3SRohit Mathew
7*962958d3SRohit Mathew#ifndef PER_CPU_ASM_S
8*962958d3SRohit Mathew#define PER_CPU_ASM_S
9*962958d3SRohit Mathew
10*962958d3SRohit Mathew#include <arch.h>
11*962958d3SRohit Mathew#include <asm_macros.S>
12*962958d3SRohit Mathew#include <lib/per_cpu/per_cpu_defs.h>
13*962958d3SRohit Mathew
14*962958d3SRohit Mathew.globl per_cpu_base
15*962958d3SRohit Mathew
16*962958d3SRohit Mathew/* -----------------------------------------------------------------
17*962958d3SRohit Mathew * Gets the per cpu base address for particular cpu. When NUMA awareness is
18*962958d3SRohit Mathew * enabled, it is the platforms responsibility to implement
19*962958d3SRohit Mathew * plat_per_cpu_base.
20*962958d3SRohit Mathew *
21*962958d3SRohit Mathew * This function would be called by asm and C routines. If NUMA awareness is
22*962958d3SRohit Mathew * enabled, care must be taken to preserve the clobber list.
23*962958d3SRohit Mathew *
24*962958d3SRohit Mathew * args - cpu in x0
25*962958d3SRohit Mathew * ret  - per cpu base address in x0.
26*962958d3SRohit Mathew * -----------------------------------------------------------------
27*962958d3SRohit Mathew */
28*962958d3SRohit Mathewfunc per_cpu_base
29*962958d3SRohit Mathew#if PLATFORM_NODE_COUNT == 1
30*962958d3SRohit Mathew	adr_l	x1, __PER_CPU_START__
31*962958d3SRohit Mathew	/* x0 += r * __PER_CPU_UNIT_SECTION_SIZE__ */
32*962958d3SRohit Mathew	adr_l	x2, __PER_CPU_UNIT_END__
33*962958d3SRohit Mathew	sub	x2, x2, x1
34*962958d3SRohit Mathew	madd	x0, x2, x0, x1
35*962958d3SRohit Mathew	ret
36*962958d3SRohit Mathew#else
37*962958d3SRohit Mathew	b	plat_per_cpu_base
38*962958d3SRohit Mathew	/* Intentionally using 'b' instead of 'bl' to avoid creating
39*962958d3SRohit Mathew	 * a return address. This saves the link register (LR) from being
40*962958d3SRohit Mathew	 * clobbered and reduces the clobber list in the calling context.
41*962958d3SRohit Mathew	 * Any future updates to append code after the branch would mean
42*962958d3SRohit Mathew	 * moving from the branch from "b" to "bl".
43*962958d3SRohit Mathew	 */
44*962958d3SRohit Mathew#endif
45*962958d3SRohit Mathewendfunc per_cpu_base
46*962958d3SRohit Mathew
47*962958d3SRohit Mathew#endif /* PER_CPU_ASM_S*/
48